1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the SystemZTargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "SystemZISelLowering.h"
14 #include "SystemZCallingConv.h"
15 #include "SystemZConstantPoolValue.h"
16 #include "SystemZMachineFunctionInfo.h"
17 #include "SystemZTargetMachine.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/IntrinsicsS390.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/KnownBits.h"
27 #include <cctype>
28
29 using namespace llvm;
30
31 #define DEBUG_TYPE "systemz-lower"
32
33 namespace {
34 // Represents information about a comparison.
35 struct Comparison {
Comparison__anond620adc00111::Comparison36 Comparison(SDValue Op0In, SDValue Op1In, SDValue ChainIn)
37 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
38 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
39
40 // The operands to the comparison.
41 SDValue Op0, Op1;
42
43 // Chain if this is a strict floating-point comparison.
44 SDValue Chain;
45
46 // The opcode that should be used to compare Op0 and Op1.
47 unsigned Opcode;
48
49 // A SystemZICMP value. Only used for integer comparisons.
50 unsigned ICmpType;
51
52 // The mask of CC values that Opcode can produce.
53 unsigned CCValid;
54
55 // The mask of CC values for which the original condition is true.
56 unsigned CCMask;
57 };
58 } // end anonymous namespace
59
60 // Classify VT as either 32 or 64 bit.
is32Bit(EVT VT)61 static bool is32Bit(EVT VT) {
62 switch (VT.getSimpleVT().SimpleTy) {
63 case MVT::i32:
64 return true;
65 case MVT::i64:
66 return false;
67 default:
68 llvm_unreachable("Unsupported type");
69 }
70 }
71
72 // Return a version of MachineOperand that can be safely used before the
73 // final use.
earlyUseOperand(MachineOperand Op)74 static MachineOperand earlyUseOperand(MachineOperand Op) {
75 if (Op.isReg())
76 Op.setIsKill(false);
77 return Op;
78 }
79
SystemZTargetLowering(const TargetMachine & TM,const SystemZSubtarget & STI)80 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
81 const SystemZSubtarget &STI)
82 : TargetLowering(TM), Subtarget(STI) {
83 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0));
84
85 // Set up the register classes.
86 if (Subtarget.hasHighWord())
87 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
88 else
89 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
90 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
91 if (!useSoftFloat()) {
92 if (Subtarget.hasVector()) {
93 addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass);
94 addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass);
95 } else {
96 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
97 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
98 }
99 if (Subtarget.hasVectorEnhancements1())
100 addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass);
101 else
102 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
103
104 if (Subtarget.hasVector()) {
105 addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass);
106 addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass);
107 addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass);
108 addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass);
109 addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass);
110 addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass);
111 }
112 }
113
114 // Compute derived properties from the register classes
115 computeRegisterProperties(Subtarget.getRegisterInfo());
116
117 // Set up special registers.
118 setStackPointerRegisterToSaveRestore(SystemZ::R15D);
119
120 // TODO: It may be better to default to latency-oriented scheduling, however
121 // LLVM's current latency-oriented scheduler can't handle physreg definitions
122 // such as SystemZ has with CC, so set this to the register-pressure
123 // scheduler, because it can.
124 setSchedulingPreference(Sched::RegPressure);
125
126 setBooleanContents(ZeroOrOneBooleanContent);
127 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
128
129 // Instructions are strings of 2-byte aligned 2-byte values.
130 setMinFunctionAlignment(Align(2));
131 // For performance reasons we prefer 16-byte alignment.
132 setPrefFunctionAlignment(Align(16));
133
134 // Handle operations that are handled in a similar way for all types.
135 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
136 I <= MVT::LAST_FP_VALUETYPE;
137 ++I) {
138 MVT VT = MVT::SimpleValueType(I);
139 if (isTypeLegal(VT)) {
140 // Lower SET_CC into an IPM-based sequence.
141 setOperationAction(ISD::SETCC, VT, Custom);
142 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
143 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
144
145 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
146 setOperationAction(ISD::SELECT, VT, Expand);
147
148 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
149 setOperationAction(ISD::SELECT_CC, VT, Custom);
150 setOperationAction(ISD::BR_CC, VT, Custom);
151 }
152 }
153
154 // Expand jump table branches as address arithmetic followed by an
155 // indirect jump.
156 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
157
158 // Expand BRCOND into a BR_CC (see above).
159 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
160
161 // Handle integer types.
162 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
163 I <= MVT::LAST_INTEGER_VALUETYPE;
164 ++I) {
165 MVT VT = MVT::SimpleValueType(I);
166 if (isTypeLegal(VT)) {
167 // Expand individual DIV and REMs into DIVREMs.
168 setOperationAction(ISD::SDIV, VT, Expand);
169 setOperationAction(ISD::UDIV, VT, Expand);
170 setOperationAction(ISD::SREM, VT, Expand);
171 setOperationAction(ISD::UREM, VT, Expand);
172 setOperationAction(ISD::SDIVREM, VT, Custom);
173 setOperationAction(ISD::UDIVREM, VT, Custom);
174
175 // Support addition/subtraction with overflow.
176 setOperationAction(ISD::SADDO, VT, Custom);
177 setOperationAction(ISD::SSUBO, VT, Custom);
178
179 // Support addition/subtraction with carry.
180 setOperationAction(ISD::UADDO, VT, Custom);
181 setOperationAction(ISD::USUBO, VT, Custom);
182
183 // Support carry in as value rather than glue.
184 setOperationAction(ISD::ADDCARRY, VT, Custom);
185 setOperationAction(ISD::SUBCARRY, VT, Custom);
186
187 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
188 // stores, putting a serialization instruction after the stores.
189 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
190 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
191
192 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
193 // available, or if the operand is constant.
194 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
195
196 // Use POPCNT on z196 and above.
197 if (Subtarget.hasPopulationCount())
198 setOperationAction(ISD::CTPOP, VT, Custom);
199 else
200 setOperationAction(ISD::CTPOP, VT, Expand);
201
202 // No special instructions for these.
203 setOperationAction(ISD::CTTZ, VT, Expand);
204 setOperationAction(ISD::ROTR, VT, Expand);
205
206 // Use *MUL_LOHI where possible instead of MULH*.
207 setOperationAction(ISD::MULHS, VT, Expand);
208 setOperationAction(ISD::MULHU, VT, Expand);
209 setOperationAction(ISD::SMUL_LOHI, VT, Custom);
210 setOperationAction(ISD::UMUL_LOHI, VT, Custom);
211
212 // Only z196 and above have native support for conversions to unsigned.
213 // On z10, promoting to i64 doesn't generate an inexact condition for
214 // values that are outside the i32 range but in the i64 range, so use
215 // the default expansion.
216 if (!Subtarget.hasFPExtension())
217 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
218
219 // Mirror those settings for STRICT_FP_TO_[SU]INT. Note that these all
220 // default to Expand, so need to be modified to Legal where appropriate.
221 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Legal);
222 if (Subtarget.hasFPExtension())
223 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Legal);
224
225 // And similarly for STRICT_[SU]INT_TO_FP.
226 setOperationAction(ISD::STRICT_SINT_TO_FP, VT, Legal);
227 if (Subtarget.hasFPExtension())
228 setOperationAction(ISD::STRICT_UINT_TO_FP, VT, Legal);
229 }
230 }
231
232 // Type legalization will convert 8- and 16-bit atomic operations into
233 // forms that operate on i32s (but still keeping the original memory VT).
234 // Lower them into full i32 operations.
235 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom);
236 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom);
237 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
238 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
239 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom);
240 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom);
241 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
242 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom);
243 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom);
244 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
245 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
246
247 // Even though i128 is not a legal type, we still need to custom lower
248 // the atomic operations in order to exploit SystemZ instructions.
249 setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
250 setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
251
252 // We can use the CC result of compare-and-swap to implement
253 // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS.
254 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom);
255 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom);
256 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
257
258 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
259
260 // Traps are legal, as we will convert them to "j .+2".
261 setOperationAction(ISD::TRAP, MVT::Other, Legal);
262
263 // z10 has instructions for signed but not unsigned FP conversion.
264 // Handle unsigned 32-bit types as signed 64-bit types.
265 if (!Subtarget.hasFPExtension()) {
266 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
267 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
268 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Promote);
269 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
270 }
271
272 // We have native support for a 64-bit CTLZ, via FLOGR.
273 setOperationAction(ISD::CTLZ, MVT::i32, Promote);
274 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote);
275 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
276
277 // On z15 we have native support for a 64-bit CTPOP.
278 if (Subtarget.hasMiscellaneousExtensions3()) {
279 setOperationAction(ISD::CTPOP, MVT::i32, Promote);
280 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
281 }
282
283 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
284 setOperationAction(ISD::OR, MVT::i64, Custom);
285
286 // FIXME: Can we support these natively?
287 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
288 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
289 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
290
291 // We have native instructions for i8, i16 and i32 extensions, but not i1.
292 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
293 for (MVT VT : MVT::integer_valuetypes()) {
294 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
295 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
296 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
297 }
298
299 // Handle the various types of symbolic address.
300 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
301 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
302 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
303 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
304 setOperationAction(ISD::JumpTable, PtrVT, Custom);
305
306 // We need to handle dynamic allocations specially because of the
307 // 160-byte area at the bottom of the stack.
308 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
309 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom);
310
311 // Use custom expanders so that we can force the function to use
312 // a frame pointer.
313 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
314 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
315
316 // Handle prefetches with PFD or PFDRL.
317 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
318
319 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
320 // Assume by default that all vector operations need to be expanded.
321 for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)
322 if (getOperationAction(Opcode, VT) == Legal)
323 setOperationAction(Opcode, VT, Expand);
324
325 // Likewise all truncating stores and extending loads.
326 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
327 setTruncStoreAction(VT, InnerVT, Expand);
328 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
329 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
330 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
331 }
332
333 if (isTypeLegal(VT)) {
334 // These operations are legal for anything that can be stored in a
335 // vector register, even if there is no native support for the format
336 // as such. In particular, we can do these for v4f32 even though there
337 // are no specific instructions for that format.
338 setOperationAction(ISD::LOAD, VT, Legal);
339 setOperationAction(ISD::STORE, VT, Legal);
340 setOperationAction(ISD::VSELECT, VT, Legal);
341 setOperationAction(ISD::BITCAST, VT, Legal);
342 setOperationAction(ISD::UNDEF, VT, Legal);
343
344 // Likewise, except that we need to replace the nodes with something
345 // more specific.
346 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
347 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
348 }
349 }
350
351 // Handle integer vector types.
352 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
353 if (isTypeLegal(VT)) {
354 // These operations have direct equivalents.
355 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal);
356 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal);
357 setOperationAction(ISD::ADD, VT, Legal);
358 setOperationAction(ISD::SUB, VT, Legal);
359 if (VT != MVT::v2i64)
360 setOperationAction(ISD::MUL, VT, Legal);
361 setOperationAction(ISD::AND, VT, Legal);
362 setOperationAction(ISD::OR, VT, Legal);
363 setOperationAction(ISD::XOR, VT, Legal);
364 if (Subtarget.hasVectorEnhancements1())
365 setOperationAction(ISD::CTPOP, VT, Legal);
366 else
367 setOperationAction(ISD::CTPOP, VT, Custom);
368 setOperationAction(ISD::CTTZ, VT, Legal);
369 setOperationAction(ISD::CTLZ, VT, Legal);
370
371 // Convert a GPR scalar to a vector by inserting it into element 0.
372 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
373
374 // Use a series of unpacks for extensions.
375 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
376 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
377
378 // Detect shifts by a scalar amount and convert them into
379 // V*_BY_SCALAR.
380 setOperationAction(ISD::SHL, VT, Custom);
381 setOperationAction(ISD::SRA, VT, Custom);
382 setOperationAction(ISD::SRL, VT, Custom);
383
384 // At present ROTL isn't matched by DAGCombiner. ROTR should be
385 // converted into ROTL.
386 setOperationAction(ISD::ROTL, VT, Expand);
387 setOperationAction(ISD::ROTR, VT, Expand);
388
389 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
390 // and inverting the result as necessary.
391 setOperationAction(ISD::SETCC, VT, Custom);
392 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
393 if (Subtarget.hasVectorEnhancements1())
394 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
395 }
396 }
397
398 if (Subtarget.hasVector()) {
399 // There should be no need to check for float types other than v2f64
400 // since <2 x f32> isn't a legal type.
401 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
402 setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal);
403 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
404 setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal);
405 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
406 setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal);
407 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
408 setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal);
409
410 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
411 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f64, Legal);
412 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
413 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f64, Legal);
414 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
415 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f64, Legal);
416 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
417 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f64, Legal);
418 }
419
420 if (Subtarget.hasVectorEnhancements2()) {
421 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
422 setOperationAction(ISD::FP_TO_SINT, MVT::v4f32, Legal);
423 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
424 setOperationAction(ISD::FP_TO_UINT, MVT::v4f32, Legal);
425 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
426 setOperationAction(ISD::SINT_TO_FP, MVT::v4f32, Legal);
427 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
428 setOperationAction(ISD::UINT_TO_FP, MVT::v4f32, Legal);
429
430 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
431 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f32, Legal);
432 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
433 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f32, Legal);
434 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
435 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f32, Legal);
436 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
437 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f32, Legal);
438 }
439
440 // Handle floating-point types.
441 for (unsigned I = MVT::FIRST_FP_VALUETYPE;
442 I <= MVT::LAST_FP_VALUETYPE;
443 ++I) {
444 MVT VT = MVT::SimpleValueType(I);
445 if (isTypeLegal(VT)) {
446 // We can use FI for FRINT.
447 setOperationAction(ISD::FRINT, VT, Legal);
448
449 // We can use the extended form of FI for other rounding operations.
450 if (Subtarget.hasFPExtension()) {
451 setOperationAction(ISD::FNEARBYINT, VT, Legal);
452 setOperationAction(ISD::FFLOOR, VT, Legal);
453 setOperationAction(ISD::FCEIL, VT, Legal);
454 setOperationAction(ISD::FTRUNC, VT, Legal);
455 setOperationAction(ISD::FROUND, VT, Legal);
456 }
457
458 // No special instructions for these.
459 setOperationAction(ISD::FSIN, VT, Expand);
460 setOperationAction(ISD::FCOS, VT, Expand);
461 setOperationAction(ISD::FSINCOS, VT, Expand);
462 setOperationAction(ISD::FREM, VT, Expand);
463 setOperationAction(ISD::FPOW, VT, Expand);
464
465 // Handle constrained floating-point operations.
466 setOperationAction(ISD::STRICT_FADD, VT, Legal);
467 setOperationAction(ISD::STRICT_FSUB, VT, Legal);
468 setOperationAction(ISD::STRICT_FMUL, VT, Legal);
469 setOperationAction(ISD::STRICT_FDIV, VT, Legal);
470 setOperationAction(ISD::STRICT_FMA, VT, Legal);
471 setOperationAction(ISD::STRICT_FSQRT, VT, Legal);
472 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
473 setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal);
474 setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
475 if (Subtarget.hasFPExtension()) {
476 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
477 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
478 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
479 setOperationAction(ISD::STRICT_FROUND, VT, Legal);
480 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
481 }
482 }
483 }
484
485 // Handle floating-point vector types.
486 if (Subtarget.hasVector()) {
487 // Scalar-to-vector conversion is just a subreg.
488 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
489 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
490
491 // Some insertions and extractions can be done directly but others
492 // need to go via integers.
493 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
494 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
495 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
496 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
497
498 // These operations have direct equivalents.
499 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
500 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
501 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
502 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
503 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
504 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
505 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
506 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
507 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
508 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
509 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
510 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
511 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
512 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
513
514 // Handle constrained floating-point operations.
515 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
516 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
517 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
518 setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
519 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
520 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
521 setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
522 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal);
523 setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
524 setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal);
525 setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
526 setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
527 }
528
529 // The vector enhancements facility 1 has instructions for these.
530 if (Subtarget.hasVectorEnhancements1()) {
531 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
532 setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
533 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
534 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
535 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
536 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
537 setOperationAction(ISD::FABS, MVT::v4f32, Legal);
538 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
539 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
540 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
541 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
542 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
543 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
544 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
545
546 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
547 setOperationAction(ISD::FMAXIMUM, MVT::f64, Legal);
548 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
549 setOperationAction(ISD::FMINIMUM, MVT::f64, Legal);
550
551 setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal);
552 setOperationAction(ISD::FMAXIMUM, MVT::v2f64, Legal);
553 setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal);
554 setOperationAction(ISD::FMINIMUM, MVT::v2f64, Legal);
555
556 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
557 setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal);
558 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
559 setOperationAction(ISD::FMINIMUM, MVT::f32, Legal);
560
561 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
562 setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal);
563 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
564 setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal);
565
566 setOperationAction(ISD::FMAXNUM, MVT::f128, Legal);
567 setOperationAction(ISD::FMAXIMUM, MVT::f128, Legal);
568 setOperationAction(ISD::FMINNUM, MVT::f128, Legal);
569 setOperationAction(ISD::FMINIMUM, MVT::f128, Legal);
570
571 // Handle constrained floating-point operations.
572 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
573 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
574 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
575 setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
576 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
577 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
578 setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
579 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal);
580 setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
581 setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal);
582 setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
583 setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
584 for (auto VT : { MVT::f32, MVT::f64, MVT::f128,
585 MVT::v4f32, MVT::v2f64 }) {
586 setOperationAction(ISD::STRICT_FMAXNUM, VT, Legal);
587 setOperationAction(ISD::STRICT_FMINNUM, VT, Legal);
588 setOperationAction(ISD::STRICT_FMAXIMUM, VT, Legal);
589 setOperationAction(ISD::STRICT_FMINIMUM, VT, Legal);
590 }
591 }
592
593 // We only have fused f128 multiply-addition on vector registers.
594 if (!Subtarget.hasVectorEnhancements1()) {
595 setOperationAction(ISD::FMA, MVT::f128, Expand);
596 setOperationAction(ISD::STRICT_FMA, MVT::f128, Expand);
597 }
598
599 // We don't have a copysign instruction on vector registers.
600 if (Subtarget.hasVectorEnhancements1())
601 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
602
603 // Needed so that we don't try to implement f128 constant loads using
604 // a load-and-extend of a f80 constant (in cases where the constant
605 // would fit in an f80).
606 for (MVT VT : MVT::fp_valuetypes())
607 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
608
609 // We don't have extending load instruction on vector registers.
610 if (Subtarget.hasVectorEnhancements1()) {
611 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
612 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
613 }
614
615 // Floating-point truncation and stores need to be done separately.
616 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
617 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
618 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
619
620 // We have 64-bit FPR<->GPR moves, but need special handling for
621 // 32-bit forms.
622 if (!Subtarget.hasVector()) {
623 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
624 setOperationAction(ISD::BITCAST, MVT::f32, Custom);
625 }
626
627 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
628 // structure, but VAEND is a no-op.
629 setOperationAction(ISD::VASTART, MVT::Other, Custom);
630 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
631 setOperationAction(ISD::VAEND, MVT::Other, Expand);
632
633 // Codes for which we want to perform some z-specific combinations.
634 setTargetDAGCombine(ISD::ZERO_EXTEND);
635 setTargetDAGCombine(ISD::SIGN_EXTEND);
636 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
637 setTargetDAGCombine(ISD::LOAD);
638 setTargetDAGCombine(ISD::STORE);
639 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
640 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
641 setTargetDAGCombine(ISD::FP_ROUND);
642 setTargetDAGCombine(ISD::STRICT_FP_ROUND);
643 setTargetDAGCombine(ISD::FP_EXTEND);
644 setTargetDAGCombine(ISD::SINT_TO_FP);
645 setTargetDAGCombine(ISD::UINT_TO_FP);
646 setTargetDAGCombine(ISD::STRICT_FP_EXTEND);
647 setTargetDAGCombine(ISD::BSWAP);
648 setTargetDAGCombine(ISD::SDIV);
649 setTargetDAGCombine(ISD::UDIV);
650 setTargetDAGCombine(ISD::SREM);
651 setTargetDAGCombine(ISD::UREM);
652 setTargetDAGCombine(ISD::INTRINSIC_VOID);
653 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
654
655 // Handle intrinsics.
656 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
657 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
658
659 // We want to use MVC in preference to even a single load/store pair.
660 MaxStoresPerMemcpy = 0;
661 MaxStoresPerMemcpyOptSize = 0;
662
663 // The main memset sequence is a byte store followed by an MVC.
664 // Two STC or MV..I stores win over that, but the kind of fused stores
665 // generated by target-independent code don't when the byte value is
666 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
667 // than "STC;MVC". Handle the choice in target-specific code instead.
668 MaxStoresPerMemset = 0;
669 MaxStoresPerMemsetOptSize = 0;
670
671 // Default to having -disable-strictnode-mutation on
672 IsStrictFPEnabled = true;
673 }
674
useSoftFloat() const675 bool SystemZTargetLowering::useSoftFloat() const {
676 return Subtarget.hasSoftFloat();
677 }
678
getSetCCResultType(const DataLayout & DL,LLVMContext &,EVT VT) const679 EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL,
680 LLVMContext &, EVT VT) const {
681 if (!VT.isVector())
682 return MVT::i32;
683 return VT.changeVectorElementTypeToInteger();
684 }
685
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT VT) const686 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(
687 const MachineFunction &MF, EVT VT) const {
688 VT = VT.getScalarType();
689
690 if (!VT.isSimple())
691 return false;
692
693 switch (VT.getSimpleVT().SimpleTy) {
694 case MVT::f32:
695 case MVT::f64:
696 return true;
697 case MVT::f128:
698 return Subtarget.hasVectorEnhancements1();
699 default:
700 break;
701 }
702
703 return false;
704 }
705
706 // Return true if the constant can be generated with a vector instruction,
707 // such as VGM, VGMB or VREPI.
isVectorConstantLegal(const SystemZSubtarget & Subtarget)708 bool SystemZVectorConstantInfo::isVectorConstantLegal(
709 const SystemZSubtarget &Subtarget) {
710 const SystemZInstrInfo *TII =
711 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
712 if (!Subtarget.hasVector() ||
713 (isFP128 && !Subtarget.hasVectorEnhancements1()))
714 return false;
715
716 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally-
717 // preferred way of creating all-zero and all-one vectors so give it
718 // priority over other methods below.
719 unsigned Mask = 0;
720 unsigned I = 0;
721 for (; I < SystemZ::VectorBytes; ++I) {
722 uint64_t Byte = IntBits.lshr(I * 8).trunc(8).getZExtValue();
723 if (Byte == 0xff)
724 Mask |= 1ULL << I;
725 else if (Byte != 0)
726 break;
727 }
728 if (I == SystemZ::VectorBytes) {
729 Opcode = SystemZISD::BYTE_MASK;
730 OpVals.push_back(Mask);
731 VecVT = MVT::getVectorVT(MVT::getIntegerVT(8), 16);
732 return true;
733 }
734
735 if (SplatBitSize > 64)
736 return false;
737
738 auto tryValue = [&](uint64_t Value) -> bool {
739 // Try VECTOR REPLICATE IMMEDIATE
740 int64_t SignedValue = SignExtend64(Value, SplatBitSize);
741 if (isInt<16>(SignedValue)) {
742 OpVals.push_back(((unsigned) SignedValue));
743 Opcode = SystemZISD::REPLICATE;
744 VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize),
745 SystemZ::VectorBits / SplatBitSize);
746 return true;
747 }
748 // Try VECTOR GENERATE MASK
749 unsigned Start, End;
750 if (TII->isRxSBGMask(Value, SplatBitSize, Start, End)) {
751 // isRxSBGMask returns the bit numbers for a full 64-bit value, with 0
752 // denoting 1 << 63 and 63 denoting 1. Convert them to bit numbers for
753 // an SplatBitSize value, so that 0 denotes 1 << (SplatBitSize-1).
754 OpVals.push_back(Start - (64 - SplatBitSize));
755 OpVals.push_back(End - (64 - SplatBitSize));
756 Opcode = SystemZISD::ROTATE_MASK;
757 VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize),
758 SystemZ::VectorBits / SplatBitSize);
759 return true;
760 }
761 return false;
762 };
763
764 // First try assuming that any undefined bits above the highest set bit
765 // and below the lowest set bit are 1s. This increases the likelihood of
766 // being able to use a sign-extended element value in VECTOR REPLICATE
767 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
768 uint64_t SplatBitsZ = SplatBits.getZExtValue();
769 uint64_t SplatUndefZ = SplatUndef.getZExtValue();
770 uint64_t Lower =
771 (SplatUndefZ & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1));
772 uint64_t Upper =
773 (SplatUndefZ & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1));
774 if (tryValue(SplatBitsZ | Upper | Lower))
775 return true;
776
777 // Now try assuming that any undefined bits between the first and
778 // last defined set bits are set. This increases the chances of
779 // using a non-wraparound mask.
780 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
781 return tryValue(SplatBitsZ | Middle);
782 }
783
SystemZVectorConstantInfo(APFloat FPImm)784 SystemZVectorConstantInfo::SystemZVectorConstantInfo(APFloat FPImm) {
785 IntBits = FPImm.bitcastToAPInt().zextOrSelf(128);
786 isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad());
787 SplatBits = FPImm.bitcastToAPInt();
788 unsigned Width = SplatBits.getBitWidth();
789 IntBits <<= (SystemZ::VectorBits - Width);
790
791 // Find the smallest splat.
792 while (Width > 8) {
793 unsigned HalfSize = Width / 2;
794 APInt HighValue = SplatBits.lshr(HalfSize).trunc(HalfSize);
795 APInt LowValue = SplatBits.trunc(HalfSize);
796
797 // If the two halves do not match, stop here.
798 if (HighValue != LowValue || 8 > HalfSize)
799 break;
800
801 SplatBits = HighValue;
802 Width = HalfSize;
803 }
804 SplatUndef = 0;
805 SplatBitSize = Width;
806 }
807
SystemZVectorConstantInfo(BuildVectorSDNode * BVN)808 SystemZVectorConstantInfo::SystemZVectorConstantInfo(BuildVectorSDNode *BVN) {
809 assert(BVN->isConstant() && "Expected a constant BUILD_VECTOR");
810 bool HasAnyUndefs;
811
812 // Get IntBits by finding the 128 bit splat.
813 BVN->isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
814 true);
815
816 // Get SplatBits by finding the 8 bit or greater splat.
817 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
818 true);
819 }
820
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const821 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
822 bool ForCodeSize) const {
823 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
824 if (Imm.isZero() || Imm.isNegZero())
825 return true;
826
827 return SystemZVectorConstantInfo(Imm).isVectorConstantLegal(Subtarget);
828 }
829
830 /// Returns true if stack probing through inline assembly is requested.
hasInlineStackProbe(MachineFunction & MF) const831 bool SystemZTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
832 // If the function specifically requests inline stack probes, emit them.
833 if (MF.getFunction().hasFnAttribute("probe-stack"))
834 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
835 "inline-asm";
836 return false;
837 }
838
isLegalICmpImmediate(int64_t Imm) const839 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
840 // We can use CGFI or CLGFI.
841 return isInt<32>(Imm) || isUInt<32>(Imm);
842 }
843
isLegalAddImmediate(int64_t Imm) const844 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const {
845 // We can use ALGFI or SLGFI.
846 return isUInt<32>(Imm) || isUInt<32>(-Imm);
847 }
848
allowsMisalignedMemoryAccesses(EVT VT,unsigned,unsigned,MachineMemOperand::Flags,bool * Fast) const849 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
850 EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const {
851 // Unaligned accesses should never be slower than the expanded version.
852 // We check specifically for aligned accesses in the few cases where
853 // they are required.
854 if (Fast)
855 *Fast = true;
856 return true;
857 }
858
859 // Information about the addressing mode for a memory access.
860 struct AddressingMode {
861 // True if a long displacement is supported.
862 bool LongDisplacement;
863
864 // True if use of index register is supported.
865 bool IndexReg;
866
AddressingModeAddressingMode867 AddressingMode(bool LongDispl, bool IdxReg) :
868 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
869 };
870
871 // Return the desired addressing mode for a Load which has only one use (in
872 // the same block) which is a Store.
getLoadStoreAddrMode(bool HasVector,Type * Ty)873 static AddressingMode getLoadStoreAddrMode(bool HasVector,
874 Type *Ty) {
875 // With vector support a Load->Store combination may be combined to either
876 // an MVC or vector operations and it seems to work best to allow the
877 // vector addressing mode.
878 if (HasVector)
879 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
880
881 // Otherwise only the MVC case is special.
882 bool MVC = Ty->isIntegerTy(8);
883 return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/);
884 }
885
886 // Return the addressing mode which seems most desirable given an LLVM
887 // Instruction pointer.
888 static AddressingMode
supportedAddressingMode(Instruction * I,bool HasVector)889 supportedAddressingMode(Instruction *I, bool HasVector) {
890 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
891 switch (II->getIntrinsicID()) {
892 default: break;
893 case Intrinsic::memset:
894 case Intrinsic::memmove:
895 case Intrinsic::memcpy:
896 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
897 }
898 }
899
900 if (isa<LoadInst>(I) && I->hasOneUse()) {
901 auto *SingleUser = cast<Instruction>(*I->user_begin());
902 if (SingleUser->getParent() == I->getParent()) {
903 if (isa<ICmpInst>(SingleUser)) {
904 if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
905 if (C->getBitWidth() <= 64 &&
906 (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue())))
907 // Comparison of memory with 16 bit signed / unsigned immediate
908 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
909 } else if (isa<StoreInst>(SingleUser))
910 // Load->Store
911 return getLoadStoreAddrMode(HasVector, I->getType());
912 }
913 } else if (auto *StoreI = dyn_cast<StoreInst>(I)) {
914 if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
915 if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent())
916 // Load->Store
917 return getLoadStoreAddrMode(HasVector, LoadI->getType());
918 }
919
920 if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) {
921
922 // * Use LDE instead of LE/LEY for z13 to avoid partial register
923 // dependencies (LDE only supports small offsets).
924 // * Utilize the vector registers to hold floating point
925 // values (vector load / store instructions only support small
926 // offsets).
927
928 Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() :
929 I->getOperand(0)->getType());
930 bool IsFPAccess = MemAccessTy->isFloatingPointTy();
931 bool IsVectorAccess = MemAccessTy->isVectorTy();
932
933 // A store of an extracted vector element will be combined into a VSTE type
934 // instruction.
935 if (!IsVectorAccess && isa<StoreInst>(I)) {
936 Value *DataOp = I->getOperand(0);
937 if (isa<ExtractElementInst>(DataOp))
938 IsVectorAccess = true;
939 }
940
941 // A load which gets inserted into a vector element will be combined into a
942 // VLE type instruction.
943 if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) {
944 User *LoadUser = *I->user_begin();
945 if (isa<InsertElementInst>(LoadUser))
946 IsVectorAccess = true;
947 }
948
949 if (IsFPAccess || IsVectorAccess)
950 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
951 }
952
953 return AddressingMode(true/*LongDispl*/, true/*IdxReg*/);
954 }
955
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const956 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
957 const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const {
958 // Punt on globals for now, although they can be used in limited
959 // RELATIVE LONG cases.
960 if (AM.BaseGV)
961 return false;
962
963 // Require a 20-bit signed offset.
964 if (!isInt<20>(AM.BaseOffs))
965 return false;
966
967 AddressingMode SupportedAM(true, true);
968 if (I != nullptr)
969 SupportedAM = supportedAddressingMode(I, Subtarget.hasVector());
970
971 if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs))
972 return false;
973
974 if (!SupportedAM.IndexReg)
975 // No indexing allowed.
976 return AM.Scale == 0;
977 else
978 // Indexing is OK but no scale factor can be applied.
979 return AM.Scale == 0 || AM.Scale == 1;
980 }
981
isTruncateFree(Type * FromType,Type * ToType) const982 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
983 if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
984 return false;
985 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedSize();
986 unsigned ToBits = ToType->getPrimitiveSizeInBits().getFixedSize();
987 return FromBits > ToBits;
988 }
989
isTruncateFree(EVT FromVT,EVT ToVT) const990 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
991 if (!FromVT.isInteger() || !ToVT.isInteger())
992 return false;
993 unsigned FromBits = FromVT.getFixedSizeInBits();
994 unsigned ToBits = ToVT.getFixedSizeInBits();
995 return FromBits > ToBits;
996 }
997
998 //===----------------------------------------------------------------------===//
999 // Inline asm support
1000 //===----------------------------------------------------------------------===//
1001
1002 TargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const1003 SystemZTargetLowering::getConstraintType(StringRef Constraint) const {
1004 if (Constraint.size() == 1) {
1005 switch (Constraint[0]) {
1006 case 'a': // Address register
1007 case 'd': // Data register (equivalent to 'r')
1008 case 'f': // Floating-point register
1009 case 'h': // High-part register
1010 case 'r': // General-purpose register
1011 case 'v': // Vector register
1012 return C_RegisterClass;
1013
1014 case 'Q': // Memory with base and unsigned 12-bit displacement
1015 case 'R': // Likewise, plus an index
1016 case 'S': // Memory with base and signed 20-bit displacement
1017 case 'T': // Likewise, plus an index
1018 case 'm': // Equivalent to 'T'.
1019 return C_Memory;
1020
1021 case 'I': // Unsigned 8-bit constant
1022 case 'J': // Unsigned 12-bit constant
1023 case 'K': // Signed 16-bit constant
1024 case 'L': // Signed 20-bit displacement (on all targets we support)
1025 case 'M': // 0x7fffffff
1026 return C_Immediate;
1027
1028 default:
1029 break;
1030 }
1031 }
1032 return TargetLowering::getConstraintType(Constraint);
1033 }
1034
1035 TargetLowering::ConstraintWeight SystemZTargetLowering::
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const1036 getSingleConstraintMatchWeight(AsmOperandInfo &info,
1037 const char *constraint) const {
1038 ConstraintWeight weight = CW_Invalid;
1039 Value *CallOperandVal = info.CallOperandVal;
1040 // If we don't have a value, we can't do a match,
1041 // but allow it at the lowest weight.
1042 if (!CallOperandVal)
1043 return CW_Default;
1044 Type *type = CallOperandVal->getType();
1045 // Look at the constraint type.
1046 switch (*constraint) {
1047 default:
1048 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
1049 break;
1050
1051 case 'a': // Address register
1052 case 'd': // Data register (equivalent to 'r')
1053 case 'h': // High-part register
1054 case 'r': // General-purpose register
1055 if (CallOperandVal->getType()->isIntegerTy())
1056 weight = CW_Register;
1057 break;
1058
1059 case 'f': // Floating-point register
1060 if (type->isFloatingPointTy())
1061 weight = CW_Register;
1062 break;
1063
1064 case 'v': // Vector register
1065 if ((type->isVectorTy() || type->isFloatingPointTy()) &&
1066 Subtarget.hasVector())
1067 weight = CW_Register;
1068 break;
1069
1070 case 'I': // Unsigned 8-bit constant
1071 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1072 if (isUInt<8>(C->getZExtValue()))
1073 weight = CW_Constant;
1074 break;
1075
1076 case 'J': // Unsigned 12-bit constant
1077 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1078 if (isUInt<12>(C->getZExtValue()))
1079 weight = CW_Constant;
1080 break;
1081
1082 case 'K': // Signed 16-bit constant
1083 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1084 if (isInt<16>(C->getSExtValue()))
1085 weight = CW_Constant;
1086 break;
1087
1088 case 'L': // Signed 20-bit displacement (on all targets we support)
1089 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1090 if (isInt<20>(C->getSExtValue()))
1091 weight = CW_Constant;
1092 break;
1093
1094 case 'M': // 0x7fffffff
1095 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1096 if (C->getZExtValue() == 0x7fffffff)
1097 weight = CW_Constant;
1098 break;
1099 }
1100 return weight;
1101 }
1102
1103 // Parse a "{tNNN}" register constraint for which the register type "t"
1104 // has already been verified. MC is the class associated with "t" and
1105 // Map maps 0-based register numbers to LLVM register numbers.
1106 static std::pair<unsigned, const TargetRegisterClass *>
parseRegisterNumber(StringRef Constraint,const TargetRegisterClass * RC,const unsigned * Map,unsigned Size)1107 parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC,
1108 const unsigned *Map, unsigned Size) {
1109 assert(*(Constraint.end()-1) == '}' && "Missing '}'");
1110 if (isdigit(Constraint[2])) {
1111 unsigned Index;
1112 bool Failed =
1113 Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index);
1114 if (!Failed && Index < Size && Map[Index])
1115 return std::make_pair(Map[Index], RC);
1116 }
1117 return std::make_pair(0U, nullptr);
1118 }
1119
1120 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const1121 SystemZTargetLowering::getRegForInlineAsmConstraint(
1122 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
1123 if (Constraint.size() == 1) {
1124 // GCC Constraint Letters
1125 switch (Constraint[0]) {
1126 default: break;
1127 case 'd': // Data register (equivalent to 'r')
1128 case 'r': // General-purpose register
1129 if (VT == MVT::i64)
1130 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1131 else if (VT == MVT::i128)
1132 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1133 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1134
1135 case 'a': // Address register
1136 if (VT == MVT::i64)
1137 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1138 else if (VT == MVT::i128)
1139 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1140 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1141
1142 case 'h': // High-part register (an LLVM extension)
1143 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1144
1145 case 'f': // Floating-point register
1146 if (!useSoftFloat()) {
1147 if (VT == MVT::f64)
1148 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1149 else if (VT == MVT::f128)
1150 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1151 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1152 }
1153 break;
1154 case 'v': // Vector register
1155 if (Subtarget.hasVector()) {
1156 if (VT == MVT::f32)
1157 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1158 if (VT == MVT::f64)
1159 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1160 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1161 }
1162 break;
1163 }
1164 }
1165 if (Constraint.size() > 0 && Constraint[0] == '{') {
1166 // We need to override the default register parsing for GPRs and FPRs
1167 // because the interpretation depends on VT. The internal names of
1168 // the registers are also different from the external names
1169 // (F0D and F0S instead of F0, etc.).
1170 if (Constraint[1] == 'r') {
1171 if (VT == MVT::i32)
1172 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
1173 SystemZMC::GR32Regs, 16);
1174 if (VT == MVT::i128)
1175 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
1176 SystemZMC::GR128Regs, 16);
1177 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
1178 SystemZMC::GR64Regs, 16);
1179 }
1180 if (Constraint[1] == 'f') {
1181 if (useSoftFloat())
1182 return std::make_pair(
1183 0u, static_cast<const TargetRegisterClass *>(nullptr));
1184 if (VT == MVT::f32)
1185 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
1186 SystemZMC::FP32Regs, 16);
1187 if (VT == MVT::f128)
1188 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
1189 SystemZMC::FP128Regs, 16);
1190 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
1191 SystemZMC::FP64Regs, 16);
1192 }
1193 if (Constraint[1] == 'v') {
1194 if (!Subtarget.hasVector())
1195 return std::make_pair(
1196 0u, static_cast<const TargetRegisterClass *>(nullptr));
1197 if (VT == MVT::f32)
1198 return parseRegisterNumber(Constraint, &SystemZ::VR32BitRegClass,
1199 SystemZMC::VR32Regs, 32);
1200 if (VT == MVT::f64)
1201 return parseRegisterNumber(Constraint, &SystemZ::VR64BitRegClass,
1202 SystemZMC::VR64Regs, 32);
1203 return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass,
1204 SystemZMC::VR128Regs, 32);
1205 }
1206 }
1207 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1208 }
1209
1210 // FIXME? Maybe this could be a TableGen attribute on some registers and
1211 // this table could be generated automatically from RegInfo.
getRegisterByName(const char * RegName,LLT VT,const MachineFunction & MF) const1212 Register SystemZTargetLowering::getRegisterByName(const char *RegName, LLT VT,
1213 const MachineFunction &MF) const {
1214
1215 Register Reg = StringSwitch<Register>(RegName)
1216 .Case("r15", SystemZ::R15D)
1217 .Default(0);
1218 if (Reg)
1219 return Reg;
1220 report_fatal_error("Invalid register name global variable");
1221 }
1222
1223 void SystemZTargetLowering::
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const1224 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
1225 std::vector<SDValue> &Ops,
1226 SelectionDAG &DAG) const {
1227 // Only support length 1 constraints for now.
1228 if (Constraint.length() == 1) {
1229 switch (Constraint[0]) {
1230 case 'I': // Unsigned 8-bit constant
1231 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1232 if (isUInt<8>(C->getZExtValue()))
1233 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1234 Op.getValueType()));
1235 return;
1236
1237 case 'J': // Unsigned 12-bit constant
1238 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1239 if (isUInt<12>(C->getZExtValue()))
1240 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1241 Op.getValueType()));
1242 return;
1243
1244 case 'K': // Signed 16-bit constant
1245 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1246 if (isInt<16>(C->getSExtValue()))
1247 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
1248 Op.getValueType()));
1249 return;
1250
1251 case 'L': // Signed 20-bit displacement (on all targets we support)
1252 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1253 if (isInt<20>(C->getSExtValue()))
1254 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
1255 Op.getValueType()));
1256 return;
1257
1258 case 'M': // 0x7fffffff
1259 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1260 if (C->getZExtValue() == 0x7fffffff)
1261 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1262 Op.getValueType()));
1263 return;
1264 }
1265 }
1266 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1267 }
1268
1269 //===----------------------------------------------------------------------===//
1270 // Calling conventions
1271 //===----------------------------------------------------------------------===//
1272
1273 #include "SystemZGenCallingConv.inc"
1274
getScratchRegisters(CallingConv::ID) const1275 const MCPhysReg *SystemZTargetLowering::getScratchRegisters(
1276 CallingConv::ID) const {
1277 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1278 SystemZ::R14D, 0 };
1279 return ScratchRegs;
1280 }
1281
allowTruncateForTailCall(Type * FromType,Type * ToType) const1282 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
1283 Type *ToType) const {
1284 return isTruncateFree(FromType, ToType);
1285 }
1286
mayBeEmittedAsTailCall(const CallInst * CI) const1287 bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
1288 return CI->isTailCall();
1289 }
1290
1291 // We do not yet support 128-bit single-element vector types. If the user
1292 // attempts to use such types as function argument or return type, prefer
1293 // to error out instead of emitting code violating the ABI.
VerifyVectorType(MVT VT,EVT ArgVT)1294 static void VerifyVectorType(MVT VT, EVT ArgVT) {
1295 if (ArgVT.isVector() && !VT.isVector())
1296 report_fatal_error("Unsupported vector argument or return type");
1297 }
1298
VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> & Ins)1299 static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) {
1300 for (unsigned i = 0; i < Ins.size(); ++i)
1301 VerifyVectorType(Ins[i].VT, Ins[i].ArgVT);
1302 }
1303
VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> & Outs)1304 static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) {
1305 for (unsigned i = 0; i < Outs.size(); ++i)
1306 VerifyVectorType(Outs[i].VT, Outs[i].ArgVT);
1307 }
1308
1309 // Value is a value that has been passed to us in the location described by VA
1310 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
1311 // any loads onto Chain.
convertLocVTToValVT(SelectionDAG & DAG,const SDLoc & DL,CCValAssign & VA,SDValue Chain,SDValue Value)1312 static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL,
1313 CCValAssign &VA, SDValue Chain,
1314 SDValue Value) {
1315 // If the argument has been promoted from a smaller type, insert an
1316 // assertion to capture this.
1317 if (VA.getLocInfo() == CCValAssign::SExt)
1318 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
1319 DAG.getValueType(VA.getValVT()));
1320 else if (VA.getLocInfo() == CCValAssign::ZExt)
1321 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
1322 DAG.getValueType(VA.getValVT()));
1323
1324 if (VA.isExtInLoc())
1325 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
1326 else if (VA.getLocInfo() == CCValAssign::BCvt) {
1327 // If this is a short vector argument loaded from the stack,
1328 // extend from i64 to full vector size and then bitcast.
1329 assert(VA.getLocVT() == MVT::i64);
1330 assert(VA.getValVT().isVector());
1331 Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)});
1332 Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value);
1333 } else
1334 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
1335 return Value;
1336 }
1337
1338 // Value is a value of type VA.getValVT() that we need to copy into
1339 // the location described by VA. Return a copy of Value converted to
1340 // VA.getValVT(). The caller is responsible for handling indirect values.
convertValVTToLocVT(SelectionDAG & DAG,const SDLoc & DL,CCValAssign & VA,SDValue Value)1341 static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL,
1342 CCValAssign &VA, SDValue Value) {
1343 switch (VA.getLocInfo()) {
1344 case CCValAssign::SExt:
1345 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
1346 case CCValAssign::ZExt:
1347 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
1348 case CCValAssign::AExt:
1349 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
1350 case CCValAssign::BCvt:
1351 // If this is a short vector argument to be stored to the stack,
1352 // bitcast to v2i64 and then extract first element.
1353 assert(VA.getLocVT() == MVT::i64);
1354 assert(VA.getValVT().isVector());
1355 Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value);
1356 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value,
1357 DAG.getConstant(0, DL, MVT::i32));
1358 case CCValAssign::Full:
1359 return Value;
1360 default:
1361 llvm_unreachable("Unhandled getLocInfo()");
1362 }
1363 }
1364
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & DL,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const1365 SDValue SystemZTargetLowering::LowerFormalArguments(
1366 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1367 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1368 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1369 MachineFunction &MF = DAG.getMachineFunction();
1370 MachineFrameInfo &MFI = MF.getFrameInfo();
1371 MachineRegisterInfo &MRI = MF.getRegInfo();
1372 SystemZMachineFunctionInfo *FuncInfo =
1373 MF.getInfo<SystemZMachineFunctionInfo>();
1374 auto *TFL =
1375 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
1376 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1377
1378 // Detect unsupported vector argument types.
1379 if (Subtarget.hasVector())
1380 VerifyVectorTypes(Ins);
1381
1382 // Assign locations to all of the incoming arguments.
1383 SmallVector<CCValAssign, 16> ArgLocs;
1384 SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1385 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
1386
1387 unsigned NumFixedGPRs = 0;
1388 unsigned NumFixedFPRs = 0;
1389 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1390 SDValue ArgValue;
1391 CCValAssign &VA = ArgLocs[I];
1392 EVT LocVT = VA.getLocVT();
1393 if (VA.isRegLoc()) {
1394 // Arguments passed in registers
1395 const TargetRegisterClass *RC;
1396 switch (LocVT.getSimpleVT().SimpleTy) {
1397 default:
1398 // Integers smaller than i64 should be promoted to i64.
1399 llvm_unreachable("Unexpected argument type");
1400 case MVT::i32:
1401 NumFixedGPRs += 1;
1402 RC = &SystemZ::GR32BitRegClass;
1403 break;
1404 case MVT::i64:
1405 NumFixedGPRs += 1;
1406 RC = &SystemZ::GR64BitRegClass;
1407 break;
1408 case MVT::f32:
1409 NumFixedFPRs += 1;
1410 RC = &SystemZ::FP32BitRegClass;
1411 break;
1412 case MVT::f64:
1413 NumFixedFPRs += 1;
1414 RC = &SystemZ::FP64BitRegClass;
1415 break;
1416 case MVT::v16i8:
1417 case MVT::v8i16:
1418 case MVT::v4i32:
1419 case MVT::v2i64:
1420 case MVT::v4f32:
1421 case MVT::v2f64:
1422 RC = &SystemZ::VR128BitRegClass;
1423 break;
1424 }
1425
1426 Register VReg = MRI.createVirtualRegister(RC);
1427 MRI.addLiveIn(VA.getLocReg(), VReg);
1428 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
1429 } else {
1430 assert(VA.isMemLoc() && "Argument not register or memory");
1431
1432 // Create the frame index object for this incoming parameter.
1433 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1434 VA.getLocMemOffset(), true);
1435
1436 // Create the SelectionDAG nodes corresponding to a load
1437 // from this parameter. Unpromoted ints and floats are
1438 // passed as right-justified 8-byte values.
1439 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1440 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1441 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
1442 DAG.getIntPtrConstant(4, DL));
1443 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
1444 MachinePointerInfo::getFixedStack(MF, FI));
1445 }
1446
1447 // Convert the value of the argument register into the value that's
1448 // being passed.
1449 if (VA.getLocInfo() == CCValAssign::Indirect) {
1450 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
1451 MachinePointerInfo()));
1452 // If the original argument was split (e.g. i128), we need
1453 // to load all parts of it here (using the same address).
1454 unsigned ArgIndex = Ins[I].OrigArgIndex;
1455 assert (Ins[I].PartOffset == 0);
1456 while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) {
1457 CCValAssign &PartVA = ArgLocs[I + 1];
1458 unsigned PartOffset = Ins[I + 1].PartOffset;
1459 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
1460 DAG.getIntPtrConstant(PartOffset, DL));
1461 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1462 MachinePointerInfo()));
1463 ++I;
1464 }
1465 } else
1466 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
1467 }
1468
1469 if (IsVarArg) {
1470 // Save the number of non-varargs registers for later use by va_start, etc.
1471 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
1472 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
1473
1474 // Likewise the address (in the form of a frame index) of where the
1475 // first stack vararg would be. The 1-byte size here is arbitrary.
1476 int64_t StackSize = CCInfo.getNextStackOffset();
1477 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
1478
1479 // ...and a similar frame index for the caller-allocated save area
1480 // that will be used to store the incoming registers.
1481 int64_t RegSaveOffset =
1482 -SystemZMC::CallFrameSize + TFL->getRegSpillOffset(MF, SystemZ::R2D) - 16;
1483 unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true);
1484 FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
1485
1486 // Store the FPR varargs in the reserved frame slots. (We store the
1487 // GPRs as part of the prologue.)
1488 if (NumFixedFPRs < SystemZ::NumArgFPRs && !useSoftFloat()) {
1489 SDValue MemOps[SystemZ::NumArgFPRs];
1490 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
1491 unsigned Offset = TFL->getRegSpillOffset(MF, SystemZ::ArgFPRs[I]);
1492 int FI =
1493 MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize + Offset, true);
1494 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1495 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
1496 &SystemZ::FP64BitRegClass);
1497 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
1498 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
1499 MachinePointerInfo::getFixedStack(MF, FI));
1500 }
1501 // Join the stores, which are independent of one another.
1502 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1503 makeArrayRef(&MemOps[NumFixedFPRs],
1504 SystemZ::NumArgFPRs-NumFixedFPRs));
1505 }
1506 }
1507
1508 return Chain;
1509 }
1510
canUseSiblingCall(const CCState & ArgCCInfo,SmallVectorImpl<CCValAssign> & ArgLocs,SmallVectorImpl<ISD::OutputArg> & Outs)1511 static bool canUseSiblingCall(const CCState &ArgCCInfo,
1512 SmallVectorImpl<CCValAssign> &ArgLocs,
1513 SmallVectorImpl<ISD::OutputArg> &Outs) {
1514 // Punt if there are any indirect or stack arguments, or if the call
1515 // needs the callee-saved argument register R6, or if the call uses
1516 // the callee-saved register arguments SwiftSelf and SwiftError.
1517 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1518 CCValAssign &VA = ArgLocs[I];
1519 if (VA.getLocInfo() == CCValAssign::Indirect)
1520 return false;
1521 if (!VA.isRegLoc())
1522 return false;
1523 Register Reg = VA.getLocReg();
1524 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1525 return false;
1526 if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError())
1527 return false;
1528 }
1529 return true;
1530 }
1531
1532 SDValue
LowerCall(CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const1533 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
1534 SmallVectorImpl<SDValue> &InVals) const {
1535 SelectionDAG &DAG = CLI.DAG;
1536 SDLoc &DL = CLI.DL;
1537 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1538 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1539 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1540 SDValue Chain = CLI.Chain;
1541 SDValue Callee = CLI.Callee;
1542 bool &IsTailCall = CLI.IsTailCall;
1543 CallingConv::ID CallConv = CLI.CallConv;
1544 bool IsVarArg = CLI.IsVarArg;
1545 MachineFunction &MF = DAG.getMachineFunction();
1546 EVT PtrVT = getPointerTy(MF.getDataLayout());
1547
1548 // Detect unsupported vector argument and return types.
1549 if (Subtarget.hasVector()) {
1550 VerifyVectorTypes(Outs);
1551 VerifyVectorTypes(Ins);
1552 }
1553
1554 // Analyze the operands of the call, assigning locations to each operand.
1555 SmallVector<CCValAssign, 16> ArgLocs;
1556 SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1557 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
1558
1559 // We don't support GuaranteedTailCallOpt, only automatically-detected
1560 // sibling calls.
1561 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs))
1562 IsTailCall = false;
1563
1564 // Get a count of how many bytes are to be pushed on the stack.
1565 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1566
1567 // Mark the start of the call.
1568 if (!IsTailCall)
1569 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
1570
1571 // Copy argument values to their designated locations.
1572 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
1573 SmallVector<SDValue, 8> MemOpChains;
1574 SDValue StackPtr;
1575 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1576 CCValAssign &VA = ArgLocs[I];
1577 SDValue ArgValue = OutVals[I];
1578
1579 if (VA.getLocInfo() == CCValAssign::Indirect) {
1580 // Store the argument in a stack slot and pass its address.
1581 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
1582 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1583 MemOpChains.push_back(
1584 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
1585 MachinePointerInfo::getFixedStack(MF, FI)));
1586 // If the original argument was split (e.g. i128), we need
1587 // to store all parts of it here (and pass just one address).
1588 unsigned ArgIndex = Outs[I].OrigArgIndex;
1589 assert (Outs[I].PartOffset == 0);
1590 while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
1591 SDValue PartValue = OutVals[I + 1];
1592 unsigned PartOffset = Outs[I + 1].PartOffset;
1593 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
1594 DAG.getIntPtrConstant(PartOffset, DL));
1595 MemOpChains.push_back(
1596 DAG.getStore(Chain, DL, PartValue, Address,
1597 MachinePointerInfo::getFixedStack(MF, FI)));
1598 ++I;
1599 }
1600 ArgValue = SpillSlot;
1601 } else
1602 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
1603
1604 if (VA.isRegLoc())
1605 // Queue up the argument copies and emit them at the end.
1606 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
1607 else {
1608 assert(VA.isMemLoc() && "Argument not register or memory");
1609
1610 // Work out the address of the stack slot. Unpromoted ints and
1611 // floats are passed as right-justified 8-byte values.
1612 if (!StackPtr.getNode())
1613 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
1614 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
1615 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1616 Offset += 4;
1617 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
1618 DAG.getIntPtrConstant(Offset, DL));
1619
1620 // Emit the store.
1621 MemOpChains.push_back(
1622 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
1623 }
1624 }
1625
1626 // Join the stores, which are independent of one another.
1627 if (!MemOpChains.empty())
1628 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1629
1630 // Accept direct calls by converting symbolic call addresses to the
1631 // associated Target* opcodes. Force %r1 to be used for indirect
1632 // tail calls.
1633 SDValue Glue;
1634 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1635 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
1636 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1637 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1638 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1639 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1640 } else if (IsTailCall) {
1641 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
1642 Glue = Chain.getValue(1);
1643 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
1644 }
1645
1646 // Build a sequence of copy-to-reg nodes, chained and glued together.
1647 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
1648 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
1649 RegsToPass[I].second, Glue);
1650 Glue = Chain.getValue(1);
1651 }
1652
1653 // The first call operand is the chain and the second is the target address.
1654 SmallVector<SDValue, 8> Ops;
1655 Ops.push_back(Chain);
1656 Ops.push_back(Callee);
1657
1658 // Add argument registers to the end of the list so that they are
1659 // known live into the call.
1660 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
1661 Ops.push_back(DAG.getRegister(RegsToPass[I].first,
1662 RegsToPass[I].second.getValueType()));
1663
1664 // Add a register mask operand representing the call-preserved registers.
1665 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1666 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
1667 assert(Mask && "Missing call preserved mask for calling convention");
1668 Ops.push_back(DAG.getRegisterMask(Mask));
1669
1670 // Glue the call to the argument copies, if any.
1671 if (Glue.getNode())
1672 Ops.push_back(Glue);
1673
1674 // Emit the call.
1675 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1676 if (IsTailCall)
1677 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
1678 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
1679 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
1680 Glue = Chain.getValue(1);
1681
1682 // Mark the end of the call, which is glued to the call itself.
1683 Chain = DAG.getCALLSEQ_END(Chain,
1684 DAG.getConstant(NumBytes, DL, PtrVT, true),
1685 DAG.getConstant(0, DL, PtrVT, true),
1686 Glue, DL);
1687 Glue = Chain.getValue(1);
1688
1689 // Assign locations to each value returned by this call.
1690 SmallVector<CCValAssign, 16> RetLocs;
1691 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1692 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
1693
1694 // Copy all of the result registers out of their specified physreg.
1695 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1696 CCValAssign &VA = RetLocs[I];
1697
1698 // Copy the value out, gluing the copy to the end of the call sequence.
1699 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
1700 VA.getLocVT(), Glue);
1701 Chain = RetValue.getValue(1);
1702 Glue = RetValue.getValue(2);
1703
1704 // Convert the value of the return register into the value that's
1705 // being returned.
1706 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
1707 }
1708
1709 return Chain;
1710 }
1711
1712 bool SystemZTargetLowering::
CanLowerReturn(CallingConv::ID CallConv,MachineFunction & MF,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext & Context) const1713 CanLowerReturn(CallingConv::ID CallConv,
1714 MachineFunction &MF, bool isVarArg,
1715 const SmallVectorImpl<ISD::OutputArg> &Outs,
1716 LLVMContext &Context) const {
1717 // Detect unsupported vector return types.
1718 if (Subtarget.hasVector())
1719 VerifyVectorTypes(Outs);
1720
1721 // Special case that we cannot easily detect in RetCC_SystemZ since
1722 // i128 is not a legal type.
1723 for (auto &Out : Outs)
1724 if (Out.ArgVT == MVT::i128)
1725 return false;
1726
1727 SmallVector<CCValAssign, 16> RetLocs;
1728 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
1729 return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ);
1730 }
1731
1732 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & DL,SelectionDAG & DAG) const1733 SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1734 bool IsVarArg,
1735 const SmallVectorImpl<ISD::OutputArg> &Outs,
1736 const SmallVectorImpl<SDValue> &OutVals,
1737 const SDLoc &DL, SelectionDAG &DAG) const {
1738 MachineFunction &MF = DAG.getMachineFunction();
1739
1740 // Detect unsupported vector return types.
1741 if (Subtarget.hasVector())
1742 VerifyVectorTypes(Outs);
1743
1744 // Assign locations to each returned value.
1745 SmallVector<CCValAssign, 16> RetLocs;
1746 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1747 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
1748
1749 // Quick exit for void returns
1750 if (RetLocs.empty())
1751 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
1752
1753 if (CallConv == CallingConv::GHC)
1754 report_fatal_error("GHC functions return void only");
1755
1756 // Copy the result values into the output registers.
1757 SDValue Glue;
1758 SmallVector<SDValue, 4> RetOps;
1759 RetOps.push_back(Chain);
1760 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1761 CCValAssign &VA = RetLocs[I];
1762 SDValue RetValue = OutVals[I];
1763
1764 // Make the return register live on exit.
1765 assert(VA.isRegLoc() && "Can only return in registers!");
1766
1767 // Promote the value as required.
1768 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
1769
1770 // Chain and glue the copies together.
1771 Register Reg = VA.getLocReg();
1772 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
1773 Glue = Chain.getValue(1);
1774 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
1775 }
1776
1777 // Update chain and glue.
1778 RetOps[0] = Chain;
1779 if (Glue.getNode())
1780 RetOps.push_back(Glue);
1781
1782 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
1783 }
1784
1785 // Return true if Op is an intrinsic node with chain that returns the CC value
1786 // as its only (other) argument. Provide the associated SystemZISD opcode and
1787 // the mask of valid CC values if so.
isIntrinsicWithCCAndChain(SDValue Op,unsigned & Opcode,unsigned & CCValid)1788 static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
1789 unsigned &CCValid) {
1790 unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1791 switch (Id) {
1792 case Intrinsic::s390_tbegin:
1793 Opcode = SystemZISD::TBEGIN;
1794 CCValid = SystemZ::CCMASK_TBEGIN;
1795 return true;
1796
1797 case Intrinsic::s390_tbegin_nofloat:
1798 Opcode = SystemZISD::TBEGIN_NOFLOAT;
1799 CCValid = SystemZ::CCMASK_TBEGIN;
1800 return true;
1801
1802 case Intrinsic::s390_tend:
1803 Opcode = SystemZISD::TEND;
1804 CCValid = SystemZ::CCMASK_TEND;
1805 return true;
1806
1807 default:
1808 return false;
1809 }
1810 }
1811
1812 // Return true if Op is an intrinsic node without chain that returns the
1813 // CC value as its final argument. Provide the associated SystemZISD
1814 // opcode and the mask of valid CC values if so.
isIntrinsicWithCC(SDValue Op,unsigned & Opcode,unsigned & CCValid)1815 static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {
1816 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1817 switch (Id) {
1818 case Intrinsic::s390_vpkshs:
1819 case Intrinsic::s390_vpksfs:
1820 case Intrinsic::s390_vpksgs:
1821 Opcode = SystemZISD::PACKS_CC;
1822 CCValid = SystemZ::CCMASK_VCMP;
1823 return true;
1824
1825 case Intrinsic::s390_vpklshs:
1826 case Intrinsic::s390_vpklsfs:
1827 case Intrinsic::s390_vpklsgs:
1828 Opcode = SystemZISD::PACKLS_CC;
1829 CCValid = SystemZ::CCMASK_VCMP;
1830 return true;
1831
1832 case Intrinsic::s390_vceqbs:
1833 case Intrinsic::s390_vceqhs:
1834 case Intrinsic::s390_vceqfs:
1835 case Intrinsic::s390_vceqgs:
1836 Opcode = SystemZISD::VICMPES;
1837 CCValid = SystemZ::CCMASK_VCMP;
1838 return true;
1839
1840 case Intrinsic::s390_vchbs:
1841 case Intrinsic::s390_vchhs:
1842 case Intrinsic::s390_vchfs:
1843 case Intrinsic::s390_vchgs:
1844 Opcode = SystemZISD::VICMPHS;
1845 CCValid = SystemZ::CCMASK_VCMP;
1846 return true;
1847
1848 case Intrinsic::s390_vchlbs:
1849 case Intrinsic::s390_vchlhs:
1850 case Intrinsic::s390_vchlfs:
1851 case Intrinsic::s390_vchlgs:
1852 Opcode = SystemZISD::VICMPHLS;
1853 CCValid = SystemZ::CCMASK_VCMP;
1854 return true;
1855
1856 case Intrinsic::s390_vtm:
1857 Opcode = SystemZISD::VTM;
1858 CCValid = SystemZ::CCMASK_VCMP;
1859 return true;
1860
1861 case Intrinsic::s390_vfaebs:
1862 case Intrinsic::s390_vfaehs:
1863 case Intrinsic::s390_vfaefs:
1864 Opcode = SystemZISD::VFAE_CC;
1865 CCValid = SystemZ::CCMASK_ANY;
1866 return true;
1867
1868 case Intrinsic::s390_vfaezbs:
1869 case Intrinsic::s390_vfaezhs:
1870 case Intrinsic::s390_vfaezfs:
1871 Opcode = SystemZISD::VFAEZ_CC;
1872 CCValid = SystemZ::CCMASK_ANY;
1873 return true;
1874
1875 case Intrinsic::s390_vfeebs:
1876 case Intrinsic::s390_vfeehs:
1877 case Intrinsic::s390_vfeefs:
1878 Opcode = SystemZISD::VFEE_CC;
1879 CCValid = SystemZ::CCMASK_ANY;
1880 return true;
1881
1882 case Intrinsic::s390_vfeezbs:
1883 case Intrinsic::s390_vfeezhs:
1884 case Intrinsic::s390_vfeezfs:
1885 Opcode = SystemZISD::VFEEZ_CC;
1886 CCValid = SystemZ::CCMASK_ANY;
1887 return true;
1888
1889 case Intrinsic::s390_vfenebs:
1890 case Intrinsic::s390_vfenehs:
1891 case Intrinsic::s390_vfenefs:
1892 Opcode = SystemZISD::VFENE_CC;
1893 CCValid = SystemZ::CCMASK_ANY;
1894 return true;
1895
1896 case Intrinsic::s390_vfenezbs:
1897 case Intrinsic::s390_vfenezhs:
1898 case Intrinsic::s390_vfenezfs:
1899 Opcode = SystemZISD::VFENEZ_CC;
1900 CCValid = SystemZ::CCMASK_ANY;
1901 return true;
1902
1903 case Intrinsic::s390_vistrbs:
1904 case Intrinsic::s390_vistrhs:
1905 case Intrinsic::s390_vistrfs:
1906 Opcode = SystemZISD::VISTR_CC;
1907 CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3;
1908 return true;
1909
1910 case Intrinsic::s390_vstrcbs:
1911 case Intrinsic::s390_vstrchs:
1912 case Intrinsic::s390_vstrcfs:
1913 Opcode = SystemZISD::VSTRC_CC;
1914 CCValid = SystemZ::CCMASK_ANY;
1915 return true;
1916
1917 case Intrinsic::s390_vstrczbs:
1918 case Intrinsic::s390_vstrczhs:
1919 case Intrinsic::s390_vstrczfs:
1920 Opcode = SystemZISD::VSTRCZ_CC;
1921 CCValid = SystemZ::CCMASK_ANY;
1922 return true;
1923
1924 case Intrinsic::s390_vstrsb:
1925 case Intrinsic::s390_vstrsh:
1926 case Intrinsic::s390_vstrsf:
1927 Opcode = SystemZISD::VSTRS_CC;
1928 CCValid = SystemZ::CCMASK_ANY;
1929 return true;
1930
1931 case Intrinsic::s390_vstrszb:
1932 case Intrinsic::s390_vstrszh:
1933 case Intrinsic::s390_vstrszf:
1934 Opcode = SystemZISD::VSTRSZ_CC;
1935 CCValid = SystemZ::CCMASK_ANY;
1936 return true;
1937
1938 case Intrinsic::s390_vfcedbs:
1939 case Intrinsic::s390_vfcesbs:
1940 Opcode = SystemZISD::VFCMPES;
1941 CCValid = SystemZ::CCMASK_VCMP;
1942 return true;
1943
1944 case Intrinsic::s390_vfchdbs:
1945 case Intrinsic::s390_vfchsbs:
1946 Opcode = SystemZISD::VFCMPHS;
1947 CCValid = SystemZ::CCMASK_VCMP;
1948 return true;
1949
1950 case Intrinsic::s390_vfchedbs:
1951 case Intrinsic::s390_vfchesbs:
1952 Opcode = SystemZISD::VFCMPHES;
1953 CCValid = SystemZ::CCMASK_VCMP;
1954 return true;
1955
1956 case Intrinsic::s390_vftcidb:
1957 case Intrinsic::s390_vftcisb:
1958 Opcode = SystemZISD::VFTCI;
1959 CCValid = SystemZ::CCMASK_VCMP;
1960 return true;
1961
1962 case Intrinsic::s390_tdc:
1963 Opcode = SystemZISD::TDC;
1964 CCValid = SystemZ::CCMASK_TDC;
1965 return true;
1966
1967 default:
1968 return false;
1969 }
1970 }
1971
1972 // Emit an intrinsic with chain and an explicit CC register result.
emitIntrinsicWithCCAndChain(SelectionDAG & DAG,SDValue Op,unsigned Opcode)1973 static SDNode *emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op,
1974 unsigned Opcode) {
1975 // Copy all operands except the intrinsic ID.
1976 unsigned NumOps = Op.getNumOperands();
1977 SmallVector<SDValue, 6> Ops;
1978 Ops.reserve(NumOps - 1);
1979 Ops.push_back(Op.getOperand(0));
1980 for (unsigned I = 2; I < NumOps; ++I)
1981 Ops.push_back(Op.getOperand(I));
1982
1983 assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
1984 SDVTList RawVTs = DAG.getVTList(MVT::i32, MVT::Other);
1985 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
1986 SDValue OldChain = SDValue(Op.getNode(), 1);
1987 SDValue NewChain = SDValue(Intr.getNode(), 1);
1988 DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
1989 return Intr.getNode();
1990 }
1991
1992 // Emit an intrinsic with an explicit CC register result.
emitIntrinsicWithCC(SelectionDAG & DAG,SDValue Op,unsigned Opcode)1993 static SDNode *emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op,
1994 unsigned Opcode) {
1995 // Copy all operands except the intrinsic ID.
1996 unsigned NumOps = Op.getNumOperands();
1997 SmallVector<SDValue, 6> Ops;
1998 Ops.reserve(NumOps - 1);
1999 for (unsigned I = 1; I < NumOps; ++I)
2000 Ops.push_back(Op.getOperand(I));
2001
2002 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), Op->getVTList(), Ops);
2003 return Intr.getNode();
2004 }
2005
2006 // CC is a comparison that will be implemented using an integer or
2007 // floating-point comparison. Return the condition code mask for
2008 // a branch on true. In the integer case, CCMASK_CMP_UO is set for
2009 // unsigned comparisons and clear for signed ones. In the floating-point
2010 // case, CCMASK_CMP_UO has its normal mask meaning (unordered).
CCMaskForCondCode(ISD::CondCode CC)2011 static unsigned CCMaskForCondCode(ISD::CondCode CC) {
2012 #define CONV(X) \
2013 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2014 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2015 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2016
2017 switch (CC) {
2018 default:
2019 llvm_unreachable("Invalid integer condition!");
2020
2021 CONV(EQ);
2022 CONV(NE);
2023 CONV(GT);
2024 CONV(GE);
2025 CONV(LT);
2026 CONV(LE);
2027
2028 case ISD::SETO: return SystemZ::CCMASK_CMP_O;
2029 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
2030 }
2031 #undef CONV
2032 }
2033
2034 // If C can be converted to a comparison against zero, adjust the operands
2035 // as necessary.
adjustZeroCmp(SelectionDAG & DAG,const SDLoc & DL,Comparison & C)2036 static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
2037 if (C.ICmpType == SystemZICMP::UnsignedOnly)
2038 return;
2039
2040 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
2041 if (!ConstOp1)
2042 return;
2043
2044 int64_t Value = ConstOp1->getSExtValue();
2045 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
2046 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
2047 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
2048 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
2049 C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
2050 C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType());
2051 }
2052 }
2053
2054 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
2055 // adjust the operands as necessary.
adjustSubwordCmp(SelectionDAG & DAG,const SDLoc & DL,Comparison & C)2056 static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL,
2057 Comparison &C) {
2058 // For us to make any changes, it must a comparison between a single-use
2059 // load and a constant.
2060 if (!C.Op0.hasOneUse() ||
2061 C.Op0.getOpcode() != ISD::LOAD ||
2062 C.Op1.getOpcode() != ISD::Constant)
2063 return;
2064
2065 // We must have an 8- or 16-bit load.
2066 auto *Load = cast<LoadSDNode>(C.Op0);
2067 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2068 if ((NumBits != 8 && NumBits != 16) ||
2069 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2070 return;
2071
2072 // The load must be an extending one and the constant must be within the
2073 // range of the unextended value.
2074 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
2075 uint64_t Value = ConstOp1->getZExtValue();
2076 uint64_t Mask = (1 << NumBits) - 1;
2077 if (Load->getExtensionType() == ISD::SEXTLOAD) {
2078 // Make sure that ConstOp1 is in range of C.Op0.
2079 int64_t SignedValue = ConstOp1->getSExtValue();
2080 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
2081 return;
2082 if (C.ICmpType != SystemZICMP::SignedOnly) {
2083 // Unsigned comparison between two sign-extended values is equivalent
2084 // to unsigned comparison between two zero-extended values.
2085 Value &= Mask;
2086 } else if (NumBits == 8) {
2087 // Try to treat the comparison as unsigned, so that we can use CLI.
2088 // Adjust CCMask and Value as necessary.
2089 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
2090 // Test whether the high bit of the byte is set.
2091 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
2092 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
2093 // Test whether the high bit of the byte is clear.
2094 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
2095 else
2096 // No instruction exists for this combination.
2097 return;
2098 C.ICmpType = SystemZICMP::UnsignedOnly;
2099 }
2100 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
2101 if (Value > Mask)
2102 return;
2103 // If the constant is in range, we can use any comparison.
2104 C.ICmpType = SystemZICMP::Any;
2105 } else
2106 return;
2107
2108 // Make sure that the first operand is an i32 of the right extension type.
2109 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ?
2110 ISD::SEXTLOAD :
2111 ISD::ZEXTLOAD);
2112 if (C.Op0.getValueType() != MVT::i32 ||
2113 Load->getExtensionType() != ExtType) {
2114 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(),
2115 Load->getBasePtr(), Load->getPointerInfo(),
2116 Load->getMemoryVT(), Load->getAlignment(),
2117 Load->getMemOperand()->getFlags());
2118 // Update the chain uses.
2119 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1));
2120 }
2121
2122 // Make sure that the second operand is an i32 with the right value.
2123 if (C.Op1.getValueType() != MVT::i32 ||
2124 Value != ConstOp1->getZExtValue())
2125 C.Op1 = DAG.getConstant(Value, DL, MVT::i32);
2126 }
2127
2128 // Return true if Op is either an unextended load, or a load suitable
2129 // for integer register-memory comparisons of type ICmpType.
isNaturalMemoryOperand(SDValue Op,unsigned ICmpType)2130 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
2131 auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
2132 if (Load) {
2133 // There are no instructions to compare a register with a memory byte.
2134 if (Load->getMemoryVT() == MVT::i8)
2135 return false;
2136 // Otherwise decide on extension type.
2137 switch (Load->getExtensionType()) {
2138 case ISD::NON_EXTLOAD:
2139 return true;
2140 case ISD::SEXTLOAD:
2141 return ICmpType != SystemZICMP::UnsignedOnly;
2142 case ISD::ZEXTLOAD:
2143 return ICmpType != SystemZICMP::SignedOnly;
2144 default:
2145 break;
2146 }
2147 }
2148 return false;
2149 }
2150
2151 // Return true if it is better to swap the operands of C.
shouldSwapCmpOperands(const Comparison & C)2152 static bool shouldSwapCmpOperands(const Comparison &C) {
2153 // Leave f128 comparisons alone, since they have no memory forms.
2154 if (C.Op0.getValueType() == MVT::f128)
2155 return false;
2156
2157 // Always keep a floating-point constant second, since comparisons with
2158 // zero can use LOAD TEST and comparisons with other constants make a
2159 // natural memory operand.
2160 if (isa<ConstantFPSDNode>(C.Op1))
2161 return false;
2162
2163 // Never swap comparisons with zero since there are many ways to optimize
2164 // those later.
2165 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
2166 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2167 return false;
2168
2169 // Also keep natural memory operands second if the loaded value is
2170 // only used here. Several comparisons have memory forms.
2171 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
2172 return false;
2173
2174 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
2175 // In that case we generally prefer the memory to be second.
2176 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
2177 // The only exceptions are when the second operand is a constant and
2178 // we can use things like CHHSI.
2179 if (!ConstOp1)
2180 return true;
2181 // The unsigned memory-immediate instructions can handle 16-bit
2182 // unsigned integers.
2183 if (C.ICmpType != SystemZICMP::SignedOnly &&
2184 isUInt<16>(ConstOp1->getZExtValue()))
2185 return false;
2186 // The signed memory-immediate instructions can handle 16-bit
2187 // signed integers.
2188 if (C.ICmpType != SystemZICMP::UnsignedOnly &&
2189 isInt<16>(ConstOp1->getSExtValue()))
2190 return false;
2191 return true;
2192 }
2193
2194 // Try to promote the use of CGFR and CLGFR.
2195 unsigned Opcode0 = C.Op0.getOpcode();
2196 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
2197 return true;
2198 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
2199 return true;
2200 if (C.ICmpType != SystemZICMP::SignedOnly &&
2201 Opcode0 == ISD::AND &&
2202 C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
2203 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
2204 return true;
2205
2206 return false;
2207 }
2208
2209 // Check whether C tests for equality between X and Y and whether X - Y
2210 // or Y - X is also computed. In that case it's better to compare the
2211 // result of the subtraction against zero.
adjustForSubtraction(SelectionDAG & DAG,const SDLoc & DL,Comparison & C)2212 static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL,
2213 Comparison &C) {
2214 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
2215 C.CCMask == SystemZ::CCMASK_CMP_NE) {
2216 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
2217 SDNode *N = *I;
2218 if (N->getOpcode() == ISD::SUB &&
2219 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
2220 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
2221 C.Op0 = SDValue(N, 0);
2222 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0));
2223 return;
2224 }
2225 }
2226 }
2227 }
2228
2229 // Check whether C compares a floating-point value with zero and if that
2230 // floating-point value is also negated. In this case we can use the
2231 // negation to set CC, so avoiding separate LOAD AND TEST and
2232 // LOAD (NEGATIVE/COMPLEMENT) instructions.
adjustForFNeg(Comparison & C)2233 static void adjustForFNeg(Comparison &C) {
2234 // This optimization is invalid for strict comparisons, since FNEG
2235 // does not raise any exceptions.
2236 if (C.Chain)
2237 return;
2238 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
2239 if (C1 && C1->isZero()) {
2240 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
2241 SDNode *N = *I;
2242 if (N->getOpcode() == ISD::FNEG) {
2243 C.Op0 = SDValue(N, 0);
2244 C.CCMask = SystemZ::reverseCCMask(C.CCMask);
2245 return;
2246 }
2247 }
2248 }
2249 }
2250
2251 // Check whether C compares (shl X, 32) with 0 and whether X is
2252 // also sign-extended. In that case it is better to test the result
2253 // of the sign extension using LTGFR.
2254 //
2255 // This case is important because InstCombine transforms a comparison
2256 // with (sext (trunc X)) into a comparison with (shl X, 32).
adjustForLTGFR(Comparison & C)2257 static void adjustForLTGFR(Comparison &C) {
2258 // Check for a comparison between (shl X, 32) and 0.
2259 if (C.Op0.getOpcode() == ISD::SHL &&
2260 C.Op0.getValueType() == MVT::i64 &&
2261 C.Op1.getOpcode() == ISD::Constant &&
2262 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2263 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
2264 if (C1 && C1->getZExtValue() == 32) {
2265 SDValue ShlOp0 = C.Op0.getOperand(0);
2266 // See whether X has any SIGN_EXTEND_INREG uses.
2267 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
2268 SDNode *N = *I;
2269 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
2270 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
2271 C.Op0 = SDValue(N, 0);
2272 return;
2273 }
2274 }
2275 }
2276 }
2277 }
2278
2279 // If C compares the truncation of an extending load, try to compare
2280 // the untruncated value instead. This exposes more opportunities to
2281 // reuse CC.
adjustICmpTruncate(SelectionDAG & DAG,const SDLoc & DL,Comparison & C)2282 static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
2283 Comparison &C) {
2284 if (C.Op0.getOpcode() == ISD::TRUNCATE &&
2285 C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
2286 C.Op1.getOpcode() == ISD::Constant &&
2287 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2288 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
2289 if (L->getMemoryVT().getStoreSizeInBits().getFixedSize() <=
2290 C.Op0.getValueSizeInBits().getFixedSize()) {
2291 unsigned Type = L->getExtensionType();
2292 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
2293 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
2294 C.Op0 = C.Op0.getOperand(0);
2295 C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType());
2296 }
2297 }
2298 }
2299 }
2300
2301 // Return true if shift operation N has an in-range constant shift value.
2302 // Store it in ShiftVal if so.
isSimpleShift(SDValue N,unsigned & ShiftVal)2303 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
2304 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
2305 if (!Shift)
2306 return false;
2307
2308 uint64_t Amount = Shift->getZExtValue();
2309 if (Amount >= N.getValueSizeInBits())
2310 return false;
2311
2312 ShiftVal = Amount;
2313 return true;
2314 }
2315
2316 // Check whether an AND with Mask is suitable for a TEST UNDER MASK
2317 // instruction and whether the CC value is descriptive enough to handle
2318 // a comparison of type Opcode between the AND result and CmpVal.
2319 // CCMask says which comparison result is being tested and BitSize is
2320 // the number of bits in the operands. If TEST UNDER MASK can be used,
2321 // return the corresponding CC mask, otherwise return 0.
getTestUnderMaskCond(unsigned BitSize,unsigned CCMask,uint64_t Mask,uint64_t CmpVal,unsigned ICmpType)2322 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
2323 uint64_t Mask, uint64_t CmpVal,
2324 unsigned ICmpType) {
2325 assert(Mask != 0 && "ANDs with zero should have been removed by now");
2326
2327 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
2328 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
2329 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
2330 return 0;
2331
2332 // Work out the masks for the lowest and highest bits.
2333 unsigned HighShift = 63 - countLeadingZeros(Mask);
2334 uint64_t High = uint64_t(1) << HighShift;
2335 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
2336
2337 // Signed ordered comparisons are effectively unsigned if the sign
2338 // bit is dropped.
2339 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
2340
2341 // Check for equality comparisons with 0, or the equivalent.
2342 if (CmpVal == 0) {
2343 if (CCMask == SystemZ::CCMASK_CMP_EQ)
2344 return SystemZ::CCMASK_TM_ALL_0;
2345 if (CCMask == SystemZ::CCMASK_CMP_NE)
2346 return SystemZ::CCMASK_TM_SOME_1;
2347 }
2348 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
2349 if (CCMask == SystemZ::CCMASK_CMP_LT)
2350 return SystemZ::CCMASK_TM_ALL_0;
2351 if (CCMask == SystemZ::CCMASK_CMP_GE)
2352 return SystemZ::CCMASK_TM_SOME_1;
2353 }
2354 if (EffectivelyUnsigned && CmpVal < Low) {
2355 if (CCMask == SystemZ::CCMASK_CMP_LE)
2356 return SystemZ::CCMASK_TM_ALL_0;
2357 if (CCMask == SystemZ::CCMASK_CMP_GT)
2358 return SystemZ::CCMASK_TM_SOME_1;
2359 }
2360
2361 // Check for equality comparisons with the mask, or the equivalent.
2362 if (CmpVal == Mask) {
2363 if (CCMask == SystemZ::CCMASK_CMP_EQ)
2364 return SystemZ::CCMASK_TM_ALL_1;
2365 if (CCMask == SystemZ::CCMASK_CMP_NE)
2366 return SystemZ::CCMASK_TM_SOME_0;
2367 }
2368 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
2369 if (CCMask == SystemZ::CCMASK_CMP_GT)
2370 return SystemZ::CCMASK_TM_ALL_1;
2371 if (CCMask == SystemZ::CCMASK_CMP_LE)
2372 return SystemZ::CCMASK_TM_SOME_0;
2373 }
2374 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
2375 if (CCMask == SystemZ::CCMASK_CMP_GE)
2376 return SystemZ::CCMASK_TM_ALL_1;
2377 if (CCMask == SystemZ::CCMASK_CMP_LT)
2378 return SystemZ::CCMASK_TM_SOME_0;
2379 }
2380
2381 // Check for ordered comparisons with the top bit.
2382 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
2383 if (CCMask == SystemZ::CCMASK_CMP_LE)
2384 return SystemZ::CCMASK_TM_MSB_0;
2385 if (CCMask == SystemZ::CCMASK_CMP_GT)
2386 return SystemZ::CCMASK_TM_MSB_1;
2387 }
2388 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
2389 if (CCMask == SystemZ::CCMASK_CMP_LT)
2390 return SystemZ::CCMASK_TM_MSB_0;
2391 if (CCMask == SystemZ::CCMASK_CMP_GE)
2392 return SystemZ::CCMASK_TM_MSB_1;
2393 }
2394
2395 // If there are just two bits, we can do equality checks for Low and High
2396 // as well.
2397 if (Mask == Low + High) {
2398 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
2399 return SystemZ::CCMASK_TM_MIXED_MSB_0;
2400 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
2401 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
2402 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
2403 return SystemZ::CCMASK_TM_MIXED_MSB_1;
2404 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
2405 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
2406 }
2407
2408 // Looks like we've exhausted our options.
2409 return 0;
2410 }
2411
2412 // See whether C can be implemented as a TEST UNDER MASK instruction.
2413 // Update the arguments with the TM version if so.
adjustForTestUnderMask(SelectionDAG & DAG,const SDLoc & DL,Comparison & C)2414 static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
2415 Comparison &C) {
2416 // Check that we have a comparison with a constant.
2417 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
2418 if (!ConstOp1)
2419 return;
2420 uint64_t CmpVal = ConstOp1->getZExtValue();
2421
2422 // Check whether the nonconstant input is an AND with a constant mask.
2423 Comparison NewC(C);
2424 uint64_t MaskVal;
2425 ConstantSDNode *Mask = nullptr;
2426 if (C.Op0.getOpcode() == ISD::AND) {
2427 NewC.Op0 = C.Op0.getOperand(0);
2428 NewC.Op1 = C.Op0.getOperand(1);
2429 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2430 if (!Mask)
2431 return;
2432 MaskVal = Mask->getZExtValue();
2433 } else {
2434 // There is no instruction to compare with a 64-bit immediate
2435 // so use TMHH instead if possible. We need an unsigned ordered
2436 // comparison with an i64 immediate.
2437 if (NewC.Op0.getValueType() != MVT::i64 ||
2438 NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
2439 NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
2440 NewC.ICmpType == SystemZICMP::SignedOnly)
2441 return;
2442 // Convert LE and GT comparisons into LT and GE.
2443 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
2444 NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
2445 if (CmpVal == uint64_t(-1))
2446 return;
2447 CmpVal += 1;
2448 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
2449 }
2450 // If the low N bits of Op1 are zero than the low N bits of Op0 can
2451 // be masked off without changing the result.
2452 MaskVal = -(CmpVal & -CmpVal);
2453 NewC.ICmpType = SystemZICMP::UnsignedOnly;
2454 }
2455 if (!MaskVal)
2456 return;
2457
2458 // Check whether the combination of mask, comparison value and comparison
2459 // type are suitable.
2460 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2461 unsigned NewCCMask, ShiftVal;
2462 if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2463 NewC.Op0.getOpcode() == ISD::SHL &&
2464 isSimpleShift(NewC.Op0, ShiftVal) &&
2465 (MaskVal >> ShiftVal != 0) &&
2466 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2467 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2468 MaskVal >> ShiftVal,
2469 CmpVal >> ShiftVal,
2470 SystemZICMP::Any))) {
2471 NewC.Op0 = NewC.Op0.getOperand(0);
2472 MaskVal >>= ShiftVal;
2473 } else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2474 NewC.Op0.getOpcode() == ISD::SRL &&
2475 isSimpleShift(NewC.Op0, ShiftVal) &&
2476 (MaskVal << ShiftVal != 0) &&
2477 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2478 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2479 MaskVal << ShiftVal,
2480 CmpVal << ShiftVal,
2481 SystemZICMP::UnsignedOnly))) {
2482 NewC.Op0 = NewC.Op0.getOperand(0);
2483 MaskVal <<= ShiftVal;
2484 } else {
2485 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
2486 NewC.ICmpType);
2487 if (!NewCCMask)
2488 return;
2489 }
2490
2491 // Go ahead and make the change.
2492 C.Opcode = SystemZISD::TM;
2493 C.Op0 = NewC.Op0;
2494 if (Mask && Mask->getZExtValue() == MaskVal)
2495 C.Op1 = SDValue(Mask, 0);
2496 else
2497 C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType());
2498 C.CCValid = SystemZ::CCMASK_TM;
2499 C.CCMask = NewCCMask;
2500 }
2501
2502 // See whether the comparison argument contains a redundant AND
2503 // and remove it if so. This sometimes happens due to the generic
2504 // BRCOND expansion.
adjustForRedundantAnd(SelectionDAG & DAG,const SDLoc & DL,Comparison & C)2505 static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL,
2506 Comparison &C) {
2507 if (C.Op0.getOpcode() != ISD::AND)
2508 return;
2509 auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
2510 if (!Mask)
2511 return;
2512 KnownBits Known = DAG.computeKnownBits(C.Op0.getOperand(0));
2513 if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue())
2514 return;
2515
2516 C.Op0 = C.Op0.getOperand(0);
2517 }
2518
2519 // Return a Comparison that tests the condition-code result of intrinsic
2520 // node Call against constant integer CC using comparison code Cond.
2521 // Opcode is the opcode of the SystemZISD operation for the intrinsic
2522 // and CCValid is the set of possible condition-code results.
getIntrinsicCmp(SelectionDAG & DAG,unsigned Opcode,SDValue Call,unsigned CCValid,uint64_t CC,ISD::CondCode Cond)2523 static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
2524 SDValue Call, unsigned CCValid, uint64_t CC,
2525 ISD::CondCode Cond) {
2526 Comparison C(Call, SDValue(), SDValue());
2527 C.Opcode = Opcode;
2528 C.CCValid = CCValid;
2529 if (Cond == ISD::SETEQ)
2530 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
2531 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
2532 else if (Cond == ISD::SETNE)
2533 // ...and the inverse of that.
2534 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
2535 else if (Cond == ISD::SETLT || Cond == ISD::SETULT)
2536 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
2537 // always true for CC>3.
2538 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
2539 else if (Cond == ISD::SETGE || Cond == ISD::SETUGE)
2540 // ...and the inverse of that.
2541 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
2542 else if (Cond == ISD::SETLE || Cond == ISD::SETULE)
2543 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
2544 // always true for CC>3.
2545 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
2546 else if (Cond == ISD::SETGT || Cond == ISD::SETUGT)
2547 // ...and the inverse of that.
2548 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
2549 else
2550 llvm_unreachable("Unexpected integer comparison type");
2551 C.CCMask &= CCValid;
2552 return C;
2553 }
2554
2555 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
getCmp(SelectionDAG & DAG,SDValue CmpOp0,SDValue CmpOp1,ISD::CondCode Cond,const SDLoc & DL,SDValue Chain=SDValue (),bool IsSignaling=false)2556 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
2557 ISD::CondCode Cond, const SDLoc &DL,
2558 SDValue Chain = SDValue(),
2559 bool IsSignaling = false) {
2560 if (CmpOp1.getOpcode() == ISD::Constant) {
2561 assert(!Chain);
2562 uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
2563 unsigned Opcode, CCValid;
2564 if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
2565 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
2566 isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
2567 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2568 if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2569 CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
2570 isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
2571 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2572 }
2573 Comparison C(CmpOp0, CmpOp1, Chain);
2574 C.CCMask = CCMaskForCondCode(Cond);
2575 if (C.Op0.getValueType().isFloatingPoint()) {
2576 C.CCValid = SystemZ::CCMASK_FCMP;
2577 if (!C.Chain)
2578 C.Opcode = SystemZISD::FCMP;
2579 else if (!IsSignaling)
2580 C.Opcode = SystemZISD::STRICT_FCMP;
2581 else
2582 C.Opcode = SystemZISD::STRICT_FCMPS;
2583 adjustForFNeg(C);
2584 } else {
2585 assert(!C.Chain);
2586 C.CCValid = SystemZ::CCMASK_ICMP;
2587 C.Opcode = SystemZISD::ICMP;
2588 // Choose the type of comparison. Equality and inequality tests can
2589 // use either signed or unsigned comparisons. The choice also doesn't
2590 // matter if both sign bits are known to be clear. In those cases we
2591 // want to give the main isel code the freedom to choose whichever
2592 // form fits best.
2593 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
2594 C.CCMask == SystemZ::CCMASK_CMP_NE ||
2595 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
2596 C.ICmpType = SystemZICMP::Any;
2597 else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
2598 C.ICmpType = SystemZICMP::UnsignedOnly;
2599 else
2600 C.ICmpType = SystemZICMP::SignedOnly;
2601 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
2602 adjustForRedundantAnd(DAG, DL, C);
2603 adjustZeroCmp(DAG, DL, C);
2604 adjustSubwordCmp(DAG, DL, C);
2605 adjustForSubtraction(DAG, DL, C);
2606 adjustForLTGFR(C);
2607 adjustICmpTruncate(DAG, DL, C);
2608 }
2609
2610 if (shouldSwapCmpOperands(C)) {
2611 std::swap(C.Op0, C.Op1);
2612 C.CCMask = SystemZ::reverseCCMask(C.CCMask);
2613 }
2614
2615 adjustForTestUnderMask(DAG, DL, C);
2616 return C;
2617 }
2618
2619 // Emit the comparison instruction described by C.
emitCmp(SelectionDAG & DAG,const SDLoc & DL,Comparison & C)2620 static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
2621 if (!C.Op1.getNode()) {
2622 SDNode *Node;
2623 switch (C.Op0.getOpcode()) {
2624 case ISD::INTRINSIC_W_CHAIN:
2625 Node = emitIntrinsicWithCCAndChain(DAG, C.Op0, C.Opcode);
2626 return SDValue(Node, 0);
2627 case ISD::INTRINSIC_WO_CHAIN:
2628 Node = emitIntrinsicWithCC(DAG, C.Op0, C.Opcode);
2629 return SDValue(Node, Node->getNumValues() - 1);
2630 default:
2631 llvm_unreachable("Invalid comparison operands");
2632 }
2633 }
2634 if (C.Opcode == SystemZISD::ICMP)
2635 return DAG.getNode(SystemZISD::ICMP, DL, MVT::i32, C.Op0, C.Op1,
2636 DAG.getTargetConstant(C.ICmpType, DL, MVT::i32));
2637 if (C.Opcode == SystemZISD::TM) {
2638 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
2639 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
2640 return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1,
2641 DAG.getTargetConstant(RegisterOnly, DL, MVT::i32));
2642 }
2643 if (C.Chain) {
2644 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
2645 return DAG.getNode(C.Opcode, DL, VTs, C.Chain, C.Op0, C.Op1);
2646 }
2647 return DAG.getNode(C.Opcode, DL, MVT::i32, C.Op0, C.Op1);
2648 }
2649
2650 // Implement a 32-bit *MUL_LOHI operation by extending both operands to
2651 // 64 bits. Extend is the extension type to use. Store the high part
2652 // in Hi and the low part in Lo.
lowerMUL_LOHI32(SelectionDAG & DAG,const SDLoc & DL,unsigned Extend,SDValue Op0,SDValue Op1,SDValue & Hi,SDValue & Lo)2653 static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
2654 SDValue Op0, SDValue Op1, SDValue &Hi,
2655 SDValue &Lo) {
2656 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
2657 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
2658 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
2659 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
2660 DAG.getConstant(32, DL, MVT::i64));
2661 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
2662 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
2663 }
2664
2665 // Lower a binary operation that produces two VT results, one in each
2666 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
2667 // and Opcode performs the GR128 operation. Store the even register result
2668 // in Even and the odd register result in Odd.
lowerGR128Binary(SelectionDAG & DAG,const SDLoc & DL,EVT VT,unsigned Opcode,SDValue Op0,SDValue Op1,SDValue & Even,SDValue & Odd)2669 static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
2670 unsigned Opcode, SDValue Op0, SDValue Op1,
2671 SDValue &Even, SDValue &Odd) {
2672 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1);
2673 bool Is32Bit = is32Bit(VT);
2674 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
2675 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
2676 }
2677
2678 // Return an i32 value that is 1 if the CC value produced by CCReg is
2679 // in the mask CCMask and 0 otherwise. CC is known to have a value
2680 // in CCValid, so other values can be ignored.
emitSETCC(SelectionDAG & DAG,const SDLoc & DL,SDValue CCReg,unsigned CCValid,unsigned CCMask)2681 static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg,
2682 unsigned CCValid, unsigned CCMask) {
2683 SDValue Ops[] = {DAG.getConstant(1, DL, MVT::i32),
2684 DAG.getConstant(0, DL, MVT::i32),
2685 DAG.getTargetConstant(CCValid, DL, MVT::i32),
2686 DAG.getTargetConstant(CCMask, DL, MVT::i32), CCReg};
2687 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops);
2688 }
2689
2690 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot
2691 // be done directly. Mode is CmpMode::Int for integer comparisons, CmpMode::FP
2692 // for regular floating-point comparisons, CmpMode::StrictFP for strict (quiet)
2693 // floating-point comparisons, and CmpMode::SignalingFP for strict signaling
2694 // floating-point comparisons.
2695 enum class CmpMode { Int, FP, StrictFP, SignalingFP };
getVectorComparison(ISD::CondCode CC,CmpMode Mode)2696 static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode) {
2697 switch (CC) {
2698 case ISD::SETOEQ:
2699 case ISD::SETEQ:
2700 switch (Mode) {
2701 case CmpMode::Int: return SystemZISD::VICMPE;
2702 case CmpMode::FP: return SystemZISD::VFCMPE;
2703 case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPE;
2704 case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPES;
2705 }
2706 llvm_unreachable("Bad mode");
2707
2708 case ISD::SETOGE:
2709 case ISD::SETGE:
2710 switch (Mode) {
2711 case CmpMode::Int: return 0;
2712 case CmpMode::FP: return SystemZISD::VFCMPHE;
2713 case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPHE;
2714 case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPHES;
2715 }
2716 llvm_unreachable("Bad mode");
2717
2718 case ISD::SETOGT:
2719 case ISD::SETGT:
2720 switch (Mode) {
2721 case CmpMode::Int: return SystemZISD::VICMPH;
2722 case CmpMode::FP: return SystemZISD::VFCMPH;
2723 case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPH;
2724 case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPHS;
2725 }
2726 llvm_unreachable("Bad mode");
2727
2728 case ISD::SETUGT:
2729 switch (Mode) {
2730 case CmpMode::Int: return SystemZISD::VICMPHL;
2731 case CmpMode::FP: return 0;
2732 case CmpMode::StrictFP: return 0;
2733 case CmpMode::SignalingFP: return 0;
2734 }
2735 llvm_unreachable("Bad mode");
2736
2737 default:
2738 return 0;
2739 }
2740 }
2741
2742 // Return the SystemZISD vector comparison operation for CC or its inverse,
2743 // or 0 if neither can be done directly. Indicate in Invert whether the
2744 // result is for the inverse of CC. Mode is as above.
getVectorComparisonOrInvert(ISD::CondCode CC,CmpMode Mode,bool & Invert)2745 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode,
2746 bool &Invert) {
2747 if (unsigned Opcode = getVectorComparison(CC, Mode)) {
2748 Invert = false;
2749 return Opcode;
2750 }
2751
2752 CC = ISD::getSetCCInverse(CC, Mode == CmpMode::Int ? MVT::i32 : MVT::f32);
2753 if (unsigned Opcode = getVectorComparison(CC, Mode)) {
2754 Invert = true;
2755 return Opcode;
2756 }
2757
2758 return 0;
2759 }
2760
2761 // Return a v2f64 that contains the extended form of elements Start and Start+1
2762 // of v4f32 value Op. If Chain is nonnull, return the strict form.
expandV4F32ToV2F64(SelectionDAG & DAG,int Start,const SDLoc & DL,SDValue Op,SDValue Chain)2763 static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL,
2764 SDValue Op, SDValue Chain) {
2765 int Mask[] = { Start, -1, Start + 1, -1 };
2766 Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask);
2767 if (Chain) {
2768 SDVTList VTs = DAG.getVTList(MVT::v2f64, MVT::Other);
2769 return DAG.getNode(SystemZISD::STRICT_VEXTEND, DL, VTs, Chain, Op);
2770 }
2771 return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op);
2772 }
2773
2774 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
2775 // producing a result of type VT. If Chain is nonnull, return the strict form.
getVectorCmp(SelectionDAG & DAG,unsigned Opcode,const SDLoc & DL,EVT VT,SDValue CmpOp0,SDValue CmpOp1,SDValue Chain) const2776 SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode,
2777 const SDLoc &DL, EVT VT,
2778 SDValue CmpOp0,
2779 SDValue CmpOp1,
2780 SDValue Chain) const {
2781 // There is no hardware support for v4f32 (unless we have the vector
2782 // enhancements facility 1), so extend the vector into two v2f64s
2783 // and compare those.
2784 if (CmpOp0.getValueType() == MVT::v4f32 &&
2785 !Subtarget.hasVectorEnhancements1()) {
2786 SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0, Chain);
2787 SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0, Chain);
2788 SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1, Chain);
2789 SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1, Chain);
2790 if (Chain) {
2791 SDVTList VTs = DAG.getVTList(MVT::v2i64, MVT::Other);
2792 SDValue HRes = DAG.getNode(Opcode, DL, VTs, Chain, H0, H1);
2793 SDValue LRes = DAG.getNode(Opcode, DL, VTs, Chain, L0, L1);
2794 SDValue Res = DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes);
2795 SDValue Chains[6] = { H0.getValue(1), L0.getValue(1),
2796 H1.getValue(1), L1.getValue(1),
2797 HRes.getValue(1), LRes.getValue(1) };
2798 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
2799 SDValue Ops[2] = { Res, NewChain };
2800 return DAG.getMergeValues(Ops, DL);
2801 }
2802 SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1);
2803 SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1);
2804 return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes);
2805 }
2806 if (Chain) {
2807 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
2808 return DAG.getNode(Opcode, DL, VTs, Chain, CmpOp0, CmpOp1);
2809 }
2810 return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1);
2811 }
2812
2813 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
2814 // an integer mask of type VT. If Chain is nonnull, we have a strict
2815 // floating-point comparison. If in addition IsSignaling is true, we have
2816 // a strict signaling floating-point comparison.
lowerVectorSETCC(SelectionDAG & DAG,const SDLoc & DL,EVT VT,ISD::CondCode CC,SDValue CmpOp0,SDValue CmpOp1,SDValue Chain,bool IsSignaling) const2817 SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG,
2818 const SDLoc &DL, EVT VT,
2819 ISD::CondCode CC,
2820 SDValue CmpOp0,
2821 SDValue CmpOp1,
2822 SDValue Chain,
2823 bool IsSignaling) const {
2824 bool IsFP = CmpOp0.getValueType().isFloatingPoint();
2825 assert (!Chain || IsFP);
2826 assert (!IsSignaling || Chain);
2827 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
2828 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
2829 bool Invert = false;
2830 SDValue Cmp;
2831 switch (CC) {
2832 // Handle tests for order using (or (ogt y x) (oge x y)).
2833 case ISD::SETUO:
2834 Invert = true;
2835 LLVM_FALLTHROUGH;
2836 case ISD::SETO: {
2837 assert(IsFP && "Unexpected integer comparison");
2838 SDValue LT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode),
2839 DL, VT, CmpOp1, CmpOp0, Chain);
2840 SDValue GE = getVectorCmp(DAG, getVectorComparison(ISD::SETOGE, Mode),
2841 DL, VT, CmpOp0, CmpOp1, Chain);
2842 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE);
2843 if (Chain)
2844 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
2845 LT.getValue(1), GE.getValue(1));
2846 break;
2847 }
2848
2849 // Handle <> tests using (or (ogt y x) (ogt x y)).
2850 case ISD::SETUEQ:
2851 Invert = true;
2852 LLVM_FALLTHROUGH;
2853 case ISD::SETONE: {
2854 assert(IsFP && "Unexpected integer comparison");
2855 SDValue LT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode),
2856 DL, VT, CmpOp1, CmpOp0, Chain);
2857 SDValue GT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode),
2858 DL, VT, CmpOp0, CmpOp1, Chain);
2859 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT);
2860 if (Chain)
2861 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
2862 LT.getValue(1), GT.getValue(1));
2863 break;
2864 }
2865
2866 // Otherwise a single comparison is enough. It doesn't really
2867 // matter whether we try the inversion or the swap first, since
2868 // there are no cases where both work.
2869 default:
2870 if (unsigned Opcode = getVectorComparisonOrInvert(CC, Mode, Invert))
2871 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1, Chain);
2872 else {
2873 CC = ISD::getSetCCSwappedOperands(CC);
2874 if (unsigned Opcode = getVectorComparisonOrInvert(CC, Mode, Invert))
2875 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0, Chain);
2876 else
2877 llvm_unreachable("Unhandled comparison");
2878 }
2879 if (Chain)
2880 Chain = Cmp.getValue(1);
2881 break;
2882 }
2883 if (Invert) {
2884 SDValue Mask =
2885 DAG.getSplatBuildVector(VT, DL, DAG.getConstant(-1, DL, MVT::i64));
2886 Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask);
2887 }
2888 if (Chain && Chain.getNode() != Cmp.getNode()) {
2889 SDValue Ops[2] = { Cmp, Chain };
2890 Cmp = DAG.getMergeValues(Ops, DL);
2891 }
2892 return Cmp;
2893 }
2894
lowerSETCC(SDValue Op,SelectionDAG & DAG) const2895 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
2896 SelectionDAG &DAG) const {
2897 SDValue CmpOp0 = Op.getOperand(0);
2898 SDValue CmpOp1 = Op.getOperand(1);
2899 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2900 SDLoc DL(Op);
2901 EVT VT = Op.getValueType();
2902 if (VT.isVector())
2903 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1);
2904
2905 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2906 SDValue CCReg = emitCmp(DAG, DL, C);
2907 return emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask);
2908 }
2909
lowerSTRICT_FSETCC(SDValue Op,SelectionDAG & DAG,bool IsSignaling) const2910 SDValue SystemZTargetLowering::lowerSTRICT_FSETCC(SDValue Op,
2911 SelectionDAG &DAG,
2912 bool IsSignaling) const {
2913 SDValue Chain = Op.getOperand(0);
2914 SDValue CmpOp0 = Op.getOperand(1);
2915 SDValue CmpOp1 = Op.getOperand(2);
2916 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get();
2917 SDLoc DL(Op);
2918 EVT VT = Op.getNode()->getValueType(0);
2919 if (VT.isVector()) {
2920 SDValue Res = lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1,
2921 Chain, IsSignaling);
2922 return Res.getValue(Op.getResNo());
2923 }
2924
2925 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL, Chain, IsSignaling));
2926 SDValue CCReg = emitCmp(DAG, DL, C);
2927 CCReg->setFlags(Op->getFlags());
2928 SDValue Result = emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask);
2929 SDValue Ops[2] = { Result, CCReg.getValue(1) };
2930 return DAG.getMergeValues(Ops, DL);
2931 }
2932
lowerBR_CC(SDValue Op,SelectionDAG & DAG) const2933 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2934 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2935 SDValue CmpOp0 = Op.getOperand(2);
2936 SDValue CmpOp1 = Op.getOperand(3);
2937 SDValue Dest = Op.getOperand(4);
2938 SDLoc DL(Op);
2939
2940 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2941 SDValue CCReg = emitCmp(DAG, DL, C);
2942 return DAG.getNode(
2943 SystemZISD::BR_CCMASK, DL, Op.getValueType(), Op.getOperand(0),
2944 DAG.getTargetConstant(C.CCValid, DL, MVT::i32),
2945 DAG.getTargetConstant(C.CCMask, DL, MVT::i32), Dest, CCReg);
2946 }
2947
2948 // Return true if Pos is CmpOp and Neg is the negative of CmpOp,
2949 // allowing Pos and Neg to be wider than CmpOp.
isAbsolute(SDValue CmpOp,SDValue Pos,SDValue Neg)2950 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
2951 return (Neg.getOpcode() == ISD::SUB &&
2952 Neg.getOperand(0).getOpcode() == ISD::Constant &&
2953 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
2954 Neg.getOperand(1) == Pos &&
2955 (Pos == CmpOp ||
2956 (Pos.getOpcode() == ISD::SIGN_EXTEND &&
2957 Pos.getOperand(0) == CmpOp)));
2958 }
2959
2960 // Return the absolute or negative absolute of Op; IsNegative decides which.
getAbsolute(SelectionDAG & DAG,const SDLoc & DL,SDValue Op,bool IsNegative)2961 static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
2962 bool IsNegative) {
2963 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
2964 if (IsNegative)
2965 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
2966 DAG.getConstant(0, DL, Op.getValueType()), Op);
2967 return Op;
2968 }
2969
lowerSELECT_CC(SDValue Op,SelectionDAG & DAG) const2970 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
2971 SelectionDAG &DAG) const {
2972 SDValue CmpOp0 = Op.getOperand(0);
2973 SDValue CmpOp1 = Op.getOperand(1);
2974 SDValue TrueOp = Op.getOperand(2);
2975 SDValue FalseOp = Op.getOperand(3);
2976 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2977 SDLoc DL(Op);
2978
2979 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2980
2981 // Check for absolute and negative-absolute selections, including those
2982 // where the comparison value is sign-extended (for LPGFR and LNGFR).
2983 // This check supplements the one in DAGCombiner.
2984 if (C.Opcode == SystemZISD::ICMP &&
2985 C.CCMask != SystemZ::CCMASK_CMP_EQ &&
2986 C.CCMask != SystemZ::CCMASK_CMP_NE &&
2987 C.Op1.getOpcode() == ISD::Constant &&
2988 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2989 if (isAbsolute(C.Op0, TrueOp, FalseOp))
2990 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
2991 if (isAbsolute(C.Op0, FalseOp, TrueOp))
2992 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
2993 }
2994
2995 SDValue CCReg = emitCmp(DAG, DL, C);
2996 SDValue Ops[] = {TrueOp, FalseOp,
2997 DAG.getTargetConstant(C.CCValid, DL, MVT::i32),
2998 DAG.getTargetConstant(C.CCMask, DL, MVT::i32), CCReg};
2999
3000 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops);
3001 }
3002
lowerGlobalAddress(GlobalAddressSDNode * Node,SelectionDAG & DAG) const3003 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
3004 SelectionDAG &DAG) const {
3005 SDLoc DL(Node);
3006 const GlobalValue *GV = Node->getGlobal();
3007 int64_t Offset = Node->getOffset();
3008 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3009 CodeModel::Model CM = DAG.getTarget().getCodeModel();
3010
3011 SDValue Result;
3012 if (Subtarget.isPC32DBLSymbol(GV, CM)) {
3013 if (isInt<32>(Offset)) {
3014 // Assign anchors at 1<<12 byte boundaries.
3015 uint64_t Anchor = Offset & ~uint64_t(0xfff);
3016 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
3017 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3018
3019 // The offset can be folded into the address if it is aligned to a
3020 // halfword.
3021 Offset -= Anchor;
3022 if (Offset != 0 && (Offset & 1) == 0) {
3023 SDValue Full =
3024 DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
3025 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
3026 Offset = 0;
3027 }
3028 } else {
3029 // Conservatively load a constant offset greater than 32 bits into a
3030 // register below.
3031 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT);
3032 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3033 }
3034 } else {
3035 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
3036 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3037 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
3038 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3039 }
3040
3041 // If there was a non-zero offset that we didn't fold, create an explicit
3042 // addition for it.
3043 if (Offset != 0)
3044 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
3045 DAG.getConstant(Offset, DL, PtrVT));
3046
3047 return Result;
3048 }
3049
lowerTLSGetOffset(GlobalAddressSDNode * Node,SelectionDAG & DAG,unsigned Opcode,SDValue GOTOffset) const3050 SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
3051 SelectionDAG &DAG,
3052 unsigned Opcode,
3053 SDValue GOTOffset) const {
3054 SDLoc DL(Node);
3055 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3056 SDValue Chain = DAG.getEntryNode();
3057 SDValue Glue;
3058
3059 if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3060 CallingConv::GHC)
3061 report_fatal_error("In GHC calling convention TLS is not supported");
3062
3063 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
3064 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
3065 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
3066 Glue = Chain.getValue(1);
3067 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
3068 Glue = Chain.getValue(1);
3069
3070 // The first call operand is the chain and the second is the TLS symbol.
3071 SmallVector<SDValue, 8> Ops;
3072 Ops.push_back(Chain);
3073 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL,
3074 Node->getValueType(0),
3075 0, 0));
3076
3077 // Add argument registers to the end of the list so that they are
3078 // known live into the call.
3079 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT));
3080 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT));
3081
3082 // Add a register mask operand representing the call-preserved registers.
3083 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3084 const uint32_t *Mask =
3085 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
3086 assert(Mask && "Missing call preserved mask for calling convention");
3087 Ops.push_back(DAG.getRegisterMask(Mask));
3088
3089 // Glue the call to the argument copies.
3090 Ops.push_back(Glue);
3091
3092 // Emit the call.
3093 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3094 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops);
3095 Glue = Chain.getValue(1);
3096
3097 // Copy the return value from %r2.
3098 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
3099 }
3100
lowerThreadPointer(const SDLoc & DL,SelectionDAG & DAG) const3101 SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL,
3102 SelectionDAG &DAG) const {
3103 SDValue Chain = DAG.getEntryNode();
3104 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3105
3106 // The high part of the thread pointer is in access register 0.
3107 SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32);
3108 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
3109
3110 // The low part of the thread pointer is in access register 1.
3111 SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32);
3112 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
3113
3114 // Merge them into a single 64-bit address.
3115 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
3116 DAG.getConstant(32, DL, PtrVT));
3117 return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
3118 }
3119
lowerGlobalTLSAddress(GlobalAddressSDNode * Node,SelectionDAG & DAG) const3120 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
3121 SelectionDAG &DAG) const {
3122 if (DAG.getTarget().useEmulatedTLS())
3123 return LowerToTLSEmulatedModel(Node, DAG);
3124 SDLoc DL(Node);
3125 const GlobalValue *GV = Node->getGlobal();
3126 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3127 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
3128
3129 if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3130 CallingConv::GHC)
3131 report_fatal_error("In GHC calling convention TLS is not supported");
3132
3133 SDValue TP = lowerThreadPointer(DL, DAG);
3134
3135 // Get the offset of GA from the thread pointer, based on the TLS model.
3136 SDValue Offset;
3137 switch (model) {
3138 case TLSModel::GeneralDynamic: {
3139 // Load the GOT offset of the tls_index (module ID / per-symbol offset).
3140 SystemZConstantPoolValue *CPV =
3141 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD);
3142
3143 Offset = DAG.getConstantPool(CPV, PtrVT, Align(8));
3144 Offset = DAG.getLoad(
3145 PtrVT, DL, DAG.getEntryNode(), Offset,
3146 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3147
3148 // Call __tls_get_offset to retrieve the offset.
3149 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
3150 break;
3151 }
3152
3153 case TLSModel::LocalDynamic: {
3154 // Load the GOT offset of the module ID.
3155 SystemZConstantPoolValue *CPV =
3156 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM);
3157
3158 Offset = DAG.getConstantPool(CPV, PtrVT, Align(8));
3159 Offset = DAG.getLoad(
3160 PtrVT, DL, DAG.getEntryNode(), Offset,
3161 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3162
3163 // Call __tls_get_offset to retrieve the module base offset.
3164 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);
3165
3166 // Note: The SystemZLDCleanupPass will remove redundant computations
3167 // of the module base offset. Count total number of local-dynamic
3168 // accesses to trigger execution of that pass.
3169 SystemZMachineFunctionInfo* MFI =
3170 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>();
3171 MFI->incNumLocalDynamicTLSAccesses();
3172
3173 // Add the per-symbol offset.
3174 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF);
3175
3176 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, Align(8));
3177 DTPOffset = DAG.getLoad(
3178 PtrVT, DL, DAG.getEntryNode(), DTPOffset,
3179 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3180
3181 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
3182 break;
3183 }
3184
3185 case TLSModel::InitialExec: {
3186 // Load the offset from the GOT.
3187 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
3188 SystemZII::MO_INDNTPOFF);
3189 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
3190 Offset =
3191 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset,
3192 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3193 break;
3194 }
3195
3196 case TLSModel::LocalExec: {
3197 // Force the offset into the constant pool and load it from there.
3198 SystemZConstantPoolValue *CPV =
3199 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
3200
3201 Offset = DAG.getConstantPool(CPV, PtrVT, Align(8));
3202 Offset = DAG.getLoad(
3203 PtrVT, DL, DAG.getEntryNode(), Offset,
3204 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3205 break;
3206 }
3207 }
3208
3209 // Add the base and offset together.
3210 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
3211 }
3212
lowerBlockAddress(BlockAddressSDNode * Node,SelectionDAG & DAG) const3213 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
3214 SelectionDAG &DAG) const {
3215 SDLoc DL(Node);
3216 const BlockAddress *BA = Node->getBlockAddress();
3217 int64_t Offset = Node->getOffset();
3218 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3219
3220 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
3221 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3222 return Result;
3223 }
3224
lowerJumpTable(JumpTableSDNode * JT,SelectionDAG & DAG) const3225 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
3226 SelectionDAG &DAG) const {
3227 SDLoc DL(JT);
3228 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3229 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3230
3231 // Use LARL to load the address of the table.
3232 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3233 }
3234
lowerConstantPool(ConstantPoolSDNode * CP,SelectionDAG & DAG) const3235 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
3236 SelectionDAG &DAG) const {
3237 SDLoc DL(CP);
3238 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3239
3240 SDValue Result;
3241 if (CP->isMachineConstantPoolEntry())
3242 Result =
3243 DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlign());
3244 else
3245 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(),
3246 CP->getOffset());
3247
3248 // Use LARL to load the address of the constant pool entry.
3249 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3250 }
3251
lowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const3252 SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
3253 SelectionDAG &DAG) const {
3254 auto *TFL =
3255 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
3256 MachineFunction &MF = DAG.getMachineFunction();
3257 MachineFrameInfo &MFI = MF.getFrameInfo();
3258 MFI.setFrameAddressIsTaken(true);
3259
3260 SDLoc DL(Op);
3261 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3262 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3263
3264 // Return null if the back chain is not present.
3265 bool HasBackChain = MF.getFunction().hasFnAttribute("backchain");
3266 if (TFL->usePackedStack(MF) && !HasBackChain)
3267 return DAG.getConstant(0, DL, PtrVT);
3268
3269 // By definition, the frame address is the address of the back chain.
3270 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
3271 SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT);
3272
3273 // FIXME The frontend should detect this case.
3274 if (Depth > 0) {
3275 report_fatal_error("Unsupported stack frame traversal count");
3276 }
3277
3278 return BackChain;
3279 }
3280
lowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const3281 SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
3282 SelectionDAG &DAG) const {
3283 MachineFunction &MF = DAG.getMachineFunction();
3284 MachineFrameInfo &MFI = MF.getFrameInfo();
3285 MFI.setReturnAddressIsTaken(true);
3286
3287 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3288 return SDValue();
3289
3290 SDLoc DL(Op);
3291 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3292 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3293
3294 // FIXME The frontend should detect this case.
3295 if (Depth > 0) {
3296 report_fatal_error("Unsupported stack frame traversal count");
3297 }
3298
3299 // Return R14D, which has the return address. Mark it an implicit live-in.
3300 unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
3301 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
3302 }
3303
lowerBITCAST(SDValue Op,SelectionDAG & DAG) const3304 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
3305 SelectionDAG &DAG) const {
3306 SDLoc DL(Op);
3307 SDValue In = Op.getOperand(0);
3308 EVT InVT = In.getValueType();
3309 EVT ResVT = Op.getValueType();
3310
3311 // Convert loads directly. This is normally done by DAGCombiner,
3312 // but we need this case for bitcasts that are created during lowering
3313 // and which are then lowered themselves.
3314 if (auto *LoadN = dyn_cast<LoadSDNode>(In))
3315 if (ISD::isNormalLoad(LoadN)) {
3316 SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(),
3317 LoadN->getBasePtr(), LoadN->getMemOperand());
3318 // Update the chain uses.
3319 DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1));
3320 return NewLoad;
3321 }
3322
3323 if (InVT == MVT::i32 && ResVT == MVT::f32) {
3324 SDValue In64;
3325 if (Subtarget.hasHighWord()) {
3326 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
3327 MVT::i64);
3328 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
3329 MVT::i64, SDValue(U64, 0), In);
3330 } else {
3331 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
3332 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
3333 DAG.getConstant(32, DL, MVT::i64));
3334 }
3335 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
3336 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
3337 DL, MVT::f32, Out64);
3338 }
3339 if (InVT == MVT::f32 && ResVT == MVT::i32) {
3340 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
3341 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
3342 MVT::f64, SDValue(U64, 0), In);
3343 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
3344 if (Subtarget.hasHighWord())
3345 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
3346 MVT::i32, Out64);
3347 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
3348 DAG.getConstant(32, DL, MVT::i64));
3349 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
3350 }
3351 llvm_unreachable("Unexpected bitcast combination");
3352 }
3353
lowerVASTART(SDValue Op,SelectionDAG & DAG) const3354 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
3355 SelectionDAG &DAG) const {
3356 MachineFunction &MF = DAG.getMachineFunction();
3357 SystemZMachineFunctionInfo *FuncInfo =
3358 MF.getInfo<SystemZMachineFunctionInfo>();
3359 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3360
3361 SDValue Chain = Op.getOperand(0);
3362 SDValue Addr = Op.getOperand(1);
3363 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3364 SDLoc DL(Op);
3365
3366 // The initial values of each field.
3367 const unsigned NumFields = 4;
3368 SDValue Fields[NumFields] = {
3369 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT),
3370 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT),
3371 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
3372 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
3373 };
3374
3375 // Store each field into its respective slot.
3376 SDValue MemOps[NumFields];
3377 unsigned Offset = 0;
3378 for (unsigned I = 0; I < NumFields; ++I) {
3379 SDValue FieldAddr = Addr;
3380 if (Offset != 0)
3381 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
3382 DAG.getIntPtrConstant(Offset, DL));
3383 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
3384 MachinePointerInfo(SV, Offset));
3385 Offset += 8;
3386 }
3387 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
3388 }
3389
lowerVACOPY(SDValue Op,SelectionDAG & DAG) const3390 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
3391 SelectionDAG &DAG) const {
3392 SDValue Chain = Op.getOperand(0);
3393 SDValue DstPtr = Op.getOperand(1);
3394 SDValue SrcPtr = Op.getOperand(2);
3395 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
3396 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
3397 SDLoc DL(Op);
3398
3399 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL),
3400 Align(8), /*isVolatile*/ false, /*AlwaysInline*/ false,
3401 /*isTailCall*/ false, MachinePointerInfo(DstSV),
3402 MachinePointerInfo(SrcSV));
3403 }
3404
3405 SDValue SystemZTargetLowering::
lowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const3406 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
3407 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
3408 MachineFunction &MF = DAG.getMachineFunction();
3409 bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack");
3410 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
3411
3412 SDValue Chain = Op.getOperand(0);
3413 SDValue Size = Op.getOperand(1);
3414 SDValue Align = Op.getOperand(2);
3415 SDLoc DL(Op);
3416
3417 // If user has set the no alignment function attribute, ignore
3418 // alloca alignments.
3419 uint64_t AlignVal = (RealignOpt ?
3420 dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0);
3421
3422 uint64_t StackAlign = TFI->getStackAlignment();
3423 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3424 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
3425
3426 Register SPReg = getStackPointerRegisterToSaveRestore();
3427 SDValue NeededSpace = Size;
3428
3429 // Get a reference to the stack pointer.
3430 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
3431
3432 // If we need a backchain, save it now.
3433 SDValue Backchain;
3434 if (StoreBackchain)
3435 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
3436
3437 // Add extra space for alignment if needed.
3438 if (ExtraAlignSpace)
3439 NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace,
3440 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
3441
3442 // Get the new stack pointer value.
3443 SDValue NewSP;
3444 if (hasInlineStackProbe(MF)) {
3445 NewSP = DAG.getNode(SystemZISD::PROBED_ALLOCA, DL,
3446 DAG.getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
3447 Chain = NewSP.getValue(1);
3448 }
3449 else {
3450 NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace);
3451 // Copy the new stack pointer back.
3452 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
3453 }
3454
3455 // The allocated data lives above the 160 bytes allocated for the standard
3456 // frame, plus any outgoing stack arguments. We don't know how much that
3457 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
3458 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
3459 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
3460
3461 // Dynamically realign if needed.
3462 if (RequiredAlign > StackAlign) {
3463 Result =
3464 DAG.getNode(ISD::ADD, DL, MVT::i64, Result,
3465 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
3466 Result =
3467 DAG.getNode(ISD::AND, DL, MVT::i64, Result,
3468 DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64));
3469 }
3470
3471 if (StoreBackchain)
3472 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
3473
3474 SDValue Ops[2] = { Result, Chain };
3475 return DAG.getMergeValues(Ops, DL);
3476 }
3477
lowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,SelectionDAG & DAG) const3478 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
3479 SDValue Op, SelectionDAG &DAG) const {
3480 SDLoc DL(Op);
3481
3482 return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
3483 }
3484
lowerSMUL_LOHI(SDValue Op,SelectionDAG & DAG) const3485 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
3486 SelectionDAG &DAG) const {
3487 EVT VT = Op.getValueType();
3488 SDLoc DL(Op);
3489 SDValue Ops[2];
3490 if (is32Bit(VT))
3491 // Just do a normal 64-bit multiplication and extract the results.
3492 // We define this so that it can be used for constant division.
3493 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
3494 Op.getOperand(1), Ops[1], Ops[0]);
3495 else if (Subtarget.hasMiscellaneousExtensions2())
3496 // SystemZISD::SMUL_LOHI returns the low result in the odd register and
3497 // the high result in the even register. ISD::SMUL_LOHI is defined to
3498 // return the low half first, so the results are in reverse order.
3499 lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI,
3500 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3501 else {
3502 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
3503 //
3504 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
3505 //
3506 // but using the fact that the upper halves are either all zeros
3507 // or all ones:
3508 //
3509 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
3510 //
3511 // and grouping the right terms together since they are quicker than the
3512 // multiplication:
3513 //
3514 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
3515 SDValue C63 = DAG.getConstant(63, DL, MVT::i64);
3516 SDValue LL = Op.getOperand(0);
3517 SDValue RL = Op.getOperand(1);
3518 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
3519 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
3520 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3521 // the high result in the even register. ISD::SMUL_LOHI is defined to
3522 // return the low half first, so the results are in reverse order.
3523 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
3524 LL, RL, Ops[1], Ops[0]);
3525 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
3526 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
3527 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
3528 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
3529 }
3530 return DAG.getMergeValues(Ops, DL);
3531 }
3532
lowerUMUL_LOHI(SDValue Op,SelectionDAG & DAG) const3533 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
3534 SelectionDAG &DAG) const {
3535 EVT VT = Op.getValueType();
3536 SDLoc DL(Op);
3537 SDValue Ops[2];
3538 if (is32Bit(VT))
3539 // Just do a normal 64-bit multiplication and extract the results.
3540 // We define this so that it can be used for constant division.
3541 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
3542 Op.getOperand(1), Ops[1], Ops[0]);
3543 else
3544 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3545 // the high result in the even register. ISD::UMUL_LOHI is defined to
3546 // return the low half first, so the results are in reverse order.
3547 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
3548 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3549 return DAG.getMergeValues(Ops, DL);
3550 }
3551
lowerSDIVREM(SDValue Op,SelectionDAG & DAG) const3552 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
3553 SelectionDAG &DAG) const {
3554 SDValue Op0 = Op.getOperand(0);
3555 SDValue Op1 = Op.getOperand(1);
3556 EVT VT = Op.getValueType();
3557 SDLoc DL(Op);
3558
3559 // We use DSGF for 32-bit division. This means the first operand must
3560 // always be 64-bit, and the second operand should be 32-bit whenever
3561 // that is possible, to improve performance.
3562 if (is32Bit(VT))
3563 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
3564 else if (DAG.ComputeNumSignBits(Op1) > 32)
3565 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
3566
3567 // DSG(F) returns the remainder in the even register and the
3568 // quotient in the odd register.
3569 SDValue Ops[2];
3570 lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]);
3571 return DAG.getMergeValues(Ops, DL);
3572 }
3573
lowerUDIVREM(SDValue Op,SelectionDAG & DAG) const3574 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
3575 SelectionDAG &DAG) const {
3576 EVT VT = Op.getValueType();
3577 SDLoc DL(Op);
3578
3579 // DL(G) returns the remainder in the even register and the
3580 // quotient in the odd register.
3581 SDValue Ops[2];
3582 lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM,
3583 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3584 return DAG.getMergeValues(Ops, DL);
3585 }
3586
lowerOR(SDValue Op,SelectionDAG & DAG) const3587 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
3588 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
3589
3590 // Get the known-zero masks for each operand.
3591 SDValue Ops[] = {Op.getOperand(0), Op.getOperand(1)};
3592 KnownBits Known[2] = {DAG.computeKnownBits(Ops[0]),
3593 DAG.computeKnownBits(Ops[1])};
3594
3595 // See if the upper 32 bits of one operand and the lower 32 bits of the
3596 // other are known zero. They are the low and high operands respectively.
3597 uint64_t Masks[] = { Known[0].Zero.getZExtValue(),
3598 Known[1].Zero.getZExtValue() };
3599 unsigned High, Low;
3600 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
3601 High = 1, Low = 0;
3602 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
3603 High = 0, Low = 1;
3604 else
3605 return Op;
3606
3607 SDValue LowOp = Ops[Low];
3608 SDValue HighOp = Ops[High];
3609
3610 // If the high part is a constant, we're better off using IILH.
3611 if (HighOp.getOpcode() == ISD::Constant)
3612 return Op;
3613
3614 // If the low part is a constant that is outside the range of LHI,
3615 // then we're better off using IILF.
3616 if (LowOp.getOpcode() == ISD::Constant) {
3617 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
3618 if (!isInt<16>(Value))
3619 return Op;
3620 }
3621
3622 // Check whether the high part is an AND that doesn't change the
3623 // high 32 bits and just masks out low bits. We can skip it if so.
3624 if (HighOp.getOpcode() == ISD::AND &&
3625 HighOp.getOperand(1).getOpcode() == ISD::Constant) {
3626 SDValue HighOp0 = HighOp.getOperand(0);
3627 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
3628 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
3629 HighOp = HighOp0;
3630 }
3631
3632 // Take advantage of the fact that all GR32 operations only change the
3633 // low 32 bits by truncating Low to an i32 and inserting it directly
3634 // using a subreg. The interesting cases are those where the truncation
3635 // can be folded.
3636 SDLoc DL(Op);
3637 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
3638 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
3639 MVT::i64, HighOp, Low32);
3640 }
3641
3642 // Lower SADDO/SSUBO/UADDO/USUBO nodes.
lowerXALUO(SDValue Op,SelectionDAG & DAG) const3643 SDValue SystemZTargetLowering::lowerXALUO(SDValue Op,
3644 SelectionDAG &DAG) const {
3645 SDNode *N = Op.getNode();
3646 SDValue LHS = N->getOperand(0);
3647 SDValue RHS = N->getOperand(1);
3648 SDLoc DL(N);
3649 unsigned BaseOp = 0;
3650 unsigned CCValid = 0;
3651 unsigned CCMask = 0;
3652
3653 switch (Op.getOpcode()) {
3654 default: llvm_unreachable("Unknown instruction!");
3655 case ISD::SADDO:
3656 BaseOp = SystemZISD::SADDO;
3657 CCValid = SystemZ::CCMASK_ARITH;
3658 CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
3659 break;
3660 case ISD::SSUBO:
3661 BaseOp = SystemZISD::SSUBO;
3662 CCValid = SystemZ::CCMASK_ARITH;
3663 CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
3664 break;
3665 case ISD::UADDO:
3666 BaseOp = SystemZISD::UADDO;
3667 CCValid = SystemZ::CCMASK_LOGICAL;
3668 CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
3669 break;
3670 case ISD::USUBO:
3671 BaseOp = SystemZISD::USUBO;
3672 CCValid = SystemZ::CCMASK_LOGICAL;
3673 CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
3674 break;
3675 }
3676
3677 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
3678 SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
3679
3680 SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
3681 if (N->getValueType(1) == MVT::i1)
3682 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
3683
3684 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
3685 }
3686
isAddCarryChain(SDValue Carry)3687 static bool isAddCarryChain(SDValue Carry) {
3688 while (Carry.getOpcode() == ISD::ADDCARRY)
3689 Carry = Carry.getOperand(2);
3690 return Carry.getOpcode() == ISD::UADDO;
3691 }
3692
isSubBorrowChain(SDValue Carry)3693 static bool isSubBorrowChain(SDValue Carry) {
3694 while (Carry.getOpcode() == ISD::SUBCARRY)
3695 Carry = Carry.getOperand(2);
3696 return Carry.getOpcode() == ISD::USUBO;
3697 }
3698
3699 // Lower ADDCARRY/SUBCARRY nodes.
lowerADDSUBCARRY(SDValue Op,SelectionDAG & DAG) const3700 SDValue SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op,
3701 SelectionDAG &DAG) const {
3702
3703 SDNode *N = Op.getNode();
3704 MVT VT = N->getSimpleValueType(0);
3705
3706 // Let legalize expand this if it isn't a legal type yet.
3707 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
3708 return SDValue();
3709
3710 SDValue LHS = N->getOperand(0);
3711 SDValue RHS = N->getOperand(1);
3712 SDValue Carry = Op.getOperand(2);
3713 SDLoc DL(N);
3714 unsigned BaseOp = 0;
3715 unsigned CCValid = 0;
3716 unsigned CCMask = 0;
3717
3718 switch (Op.getOpcode()) {
3719 default: llvm_unreachable("Unknown instruction!");
3720 case ISD::ADDCARRY:
3721 if (!isAddCarryChain(Carry))
3722 return SDValue();
3723
3724 BaseOp = SystemZISD::ADDCARRY;
3725 CCValid = SystemZ::CCMASK_LOGICAL;
3726 CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
3727 break;
3728 case ISD::SUBCARRY:
3729 if (!isSubBorrowChain(Carry))
3730 return SDValue();
3731
3732 BaseOp = SystemZISD::SUBCARRY;
3733 CCValid = SystemZ::CCMASK_LOGICAL;
3734 CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
3735 break;
3736 }
3737
3738 // Set the condition code from the carry flag.
3739 Carry = DAG.getNode(SystemZISD::GET_CCMASK, DL, MVT::i32, Carry,
3740 DAG.getConstant(CCValid, DL, MVT::i32),
3741 DAG.getConstant(CCMask, DL, MVT::i32));
3742
3743 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3744 SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS, Carry);
3745
3746 SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
3747 if (N->getValueType(1) == MVT::i1)
3748 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
3749
3750 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
3751 }
3752
lowerCTPOP(SDValue Op,SelectionDAG & DAG) const3753 SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
3754 SelectionDAG &DAG) const {
3755 EVT VT = Op.getValueType();
3756 SDLoc DL(Op);
3757 Op = Op.getOperand(0);
3758
3759 // Handle vector types via VPOPCT.
3760 if (VT.isVector()) {
3761 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op);
3762 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op);
3763 switch (VT.getScalarSizeInBits()) {
3764 case 8:
3765 break;
3766 case 16: {
3767 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
3768 SDValue Shift = DAG.getConstant(8, DL, MVT::i32);
3769 SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift);
3770 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3771 Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift);
3772 break;
3773 }
3774 case 32: {
3775 SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL,
3776 DAG.getConstant(0, DL, MVT::i32));
3777 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3778 break;
3779 }
3780 case 64: {
3781 SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL,
3782 DAG.getConstant(0, DL, MVT::i32));
3783 Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp);
3784 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3785 break;
3786 }
3787 default:
3788 llvm_unreachable("Unexpected type");
3789 }
3790 return Op;
3791 }
3792
3793 // Get the known-zero mask for the operand.
3794 KnownBits Known = DAG.computeKnownBits(Op);
3795 unsigned NumSignificantBits = Known.getMaxValue().getActiveBits();
3796 if (NumSignificantBits == 0)
3797 return DAG.getConstant(0, DL, VT);
3798
3799 // Skip known-zero high parts of the operand.
3800 int64_t OrigBitSize = VT.getSizeInBits();
3801 int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits);
3802 BitSize = std::min(BitSize, OrigBitSize);
3803
3804 // The POPCNT instruction counts the number of bits in each byte.
3805 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op);
3806 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op);
3807 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
3808
3809 // Add up per-byte counts in a binary tree. All bits of Op at
3810 // position larger than BitSize remain zero throughout.
3811 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
3812 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT));
3813 if (BitSize != OrigBitSize)
3814 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp,
3815 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
3816 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3817 }
3818
3819 // Extract overall result from high byte.
3820 if (BitSize > 8)
3821 Op = DAG.getNode(ISD::SRL, DL, VT, Op,
3822 DAG.getConstant(BitSize - 8, DL, VT));
3823
3824 return Op;
3825 }
3826
lowerATOMIC_FENCE(SDValue Op,SelectionDAG & DAG) const3827 SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
3828 SelectionDAG &DAG) const {
3829 SDLoc DL(Op);
3830 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
3831 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
3832 SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
3833 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
3834
3835 // The only fence that needs an instruction is a sequentially-consistent
3836 // cross-thread fence.
3837 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
3838 FenceSSID == SyncScope::System) {
3839 return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
3840 Op.getOperand(0)),
3841 0);
3842 }
3843
3844 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
3845 return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
3846 }
3847
3848 // Op is an atomic load. Lower it into a normal volatile load.
lowerATOMIC_LOAD(SDValue Op,SelectionDAG & DAG) const3849 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
3850 SelectionDAG &DAG) const {
3851 auto *Node = cast<AtomicSDNode>(Op.getNode());
3852 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
3853 Node->getChain(), Node->getBasePtr(),
3854 Node->getMemoryVT(), Node->getMemOperand());
3855 }
3856
3857 // Op is an atomic store. Lower it into a normal volatile store.
lowerATOMIC_STORE(SDValue Op,SelectionDAG & DAG) const3858 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
3859 SelectionDAG &DAG) const {
3860 auto *Node = cast<AtomicSDNode>(Op.getNode());
3861 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
3862 Node->getBasePtr(), Node->getMemoryVT(),
3863 Node->getMemOperand());
3864 // We have to enforce sequential consistency by performing a
3865 // serialization operation after the store.
3866 if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent)
3867 Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op),
3868 MVT::Other, Chain), 0);
3869 return Chain;
3870 }
3871
3872 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
3873 // two into the fullword ATOMIC_LOADW_* operation given by Opcode.
lowerATOMIC_LOAD_OP(SDValue Op,SelectionDAG & DAG,unsigned Opcode) const3874 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
3875 SelectionDAG &DAG,
3876 unsigned Opcode) const {
3877 auto *Node = cast<AtomicSDNode>(Op.getNode());
3878
3879 // 32-bit operations need no code outside the main loop.
3880 EVT NarrowVT = Node->getMemoryVT();
3881 EVT WideVT = MVT::i32;
3882 if (NarrowVT == WideVT)
3883 return Op;
3884
3885 int64_t BitSize = NarrowVT.getSizeInBits();
3886 SDValue ChainIn = Node->getChain();
3887 SDValue Addr = Node->getBasePtr();
3888 SDValue Src2 = Node->getVal();
3889 MachineMemOperand *MMO = Node->getMemOperand();
3890 SDLoc DL(Node);
3891 EVT PtrVT = Addr.getValueType();
3892
3893 // Convert atomic subtracts of constants into additions.
3894 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
3895 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
3896 Opcode = SystemZISD::ATOMIC_LOADW_ADD;
3897 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType());
3898 }
3899
3900 // Get the address of the containing word.
3901 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3902 DAG.getConstant(-4, DL, PtrVT));
3903
3904 // Get the number of bits that the word must be rotated left in order
3905 // to bring the field to the top bits of a GR32.
3906 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3907 DAG.getConstant(3, DL, PtrVT));
3908 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3909
3910 // Get the complementing shift amount, for rotating a field in the top
3911 // bits back to its proper position.
3912 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3913 DAG.getConstant(0, DL, WideVT), BitShift);
3914
3915 // Extend the source operand to 32 bits and prepare it for the inner loop.
3916 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
3917 // operations require the source to be shifted in advance. (This shift
3918 // can be folded if the source is constant.) For AND and NAND, the lower
3919 // bits must be set, while for other opcodes they should be left clear.
3920 if (Opcode != SystemZISD::ATOMIC_SWAPW)
3921 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
3922 DAG.getConstant(32 - BitSize, DL, WideVT));
3923 if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
3924 Opcode == SystemZISD::ATOMIC_LOADW_NAND)
3925 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
3926 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT));
3927
3928 // Construct the ATOMIC_LOADW_* node.
3929 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
3930 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
3931 DAG.getConstant(BitSize, DL, WideVT) };
3932 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
3933 NarrowVT, MMO);
3934
3935 // Rotate the result of the final CS so that the field is in the lower
3936 // bits of a GR32, then truncate it.
3937 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
3938 DAG.getConstant(BitSize, DL, WideVT));
3939 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
3940
3941 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
3942 return DAG.getMergeValues(RetOps, DL);
3943 }
3944
3945 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
3946 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
3947 // operations into additions.
lowerATOMIC_LOAD_SUB(SDValue Op,SelectionDAG & DAG) const3948 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
3949 SelectionDAG &DAG) const {
3950 auto *Node = cast<AtomicSDNode>(Op.getNode());
3951 EVT MemVT = Node->getMemoryVT();
3952 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
3953 // A full-width operation.
3954 assert(Op.getValueType() == MemVT && "Mismatched VTs");
3955 SDValue Src2 = Node->getVal();
3956 SDValue NegSrc2;
3957 SDLoc DL(Src2);
3958
3959 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
3960 // Use an addition if the operand is constant and either LAA(G) is
3961 // available or the negative value is in the range of A(G)FHI.
3962 int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
3963 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
3964 NegSrc2 = DAG.getConstant(Value, DL, MemVT);
3965 } else if (Subtarget.hasInterlockedAccess1())
3966 // Use LAA(G) if available.
3967 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT),
3968 Src2);
3969
3970 if (NegSrc2.getNode())
3971 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
3972 Node->getChain(), Node->getBasePtr(), NegSrc2,
3973 Node->getMemOperand());
3974
3975 // Use the node as-is.
3976 return Op;
3977 }
3978
3979 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
3980 }
3981
3982 // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node.
lowerATOMIC_CMP_SWAP(SDValue Op,SelectionDAG & DAG) const3983 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
3984 SelectionDAG &DAG) const {
3985 auto *Node = cast<AtomicSDNode>(Op.getNode());
3986 SDValue ChainIn = Node->getOperand(0);
3987 SDValue Addr = Node->getOperand(1);
3988 SDValue CmpVal = Node->getOperand(2);
3989 SDValue SwapVal = Node->getOperand(3);
3990 MachineMemOperand *MMO = Node->getMemOperand();
3991 SDLoc DL(Node);
3992
3993 // We have native support for 32-bit and 64-bit compare and swap, but we
3994 // still need to expand extracting the "success" result from the CC.
3995 EVT NarrowVT = Node->getMemoryVT();
3996 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
3997 if (NarrowVT == WideVT) {
3998 SDVTList Tys = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
3999 SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal };
4000 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP,
4001 DL, Tys, Ops, NarrowVT, MMO);
4002 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
4003 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ);
4004
4005 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
4006 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
4007 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
4008 return SDValue();
4009 }
4010
4011 // Convert 8-bit and 16-bit compare and swap to a loop, implemented
4012 // via a fullword ATOMIC_CMP_SWAPW operation.
4013 int64_t BitSize = NarrowVT.getSizeInBits();
4014 EVT PtrVT = Addr.getValueType();
4015
4016 // Get the address of the containing word.
4017 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
4018 DAG.getConstant(-4, DL, PtrVT));
4019
4020 // Get the number of bits that the word must be rotated left in order
4021 // to bring the field to the top bits of a GR32.
4022 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
4023 DAG.getConstant(3, DL, PtrVT));
4024 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
4025
4026 // Get the complementing shift amount, for rotating a field in the top
4027 // bits back to its proper position.
4028 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
4029 DAG.getConstant(0, DL, WideVT), BitShift);
4030
4031 // Construct the ATOMIC_CMP_SWAPW node.
4032 SDVTList VTList = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
4033 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
4034 NegBitShift, DAG.getConstant(BitSize, DL, WideVT) };
4035 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
4036 VTList, Ops, NarrowVT, MMO);
4037 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
4038 SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ);
4039
4040 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
4041 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
4042 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
4043 return SDValue();
4044 }
4045
4046 MachineMemOperand::Flags
getTargetMMOFlags(const Instruction & I) const4047 SystemZTargetLowering::getTargetMMOFlags(const Instruction &I) const {
4048 // Because of how we convert atomic_load and atomic_store to normal loads and
4049 // stores in the DAG, we need to ensure that the MMOs are marked volatile
4050 // since DAGCombine hasn't been updated to account for atomic, but non
4051 // volatile loads. (See D57601)
4052 if (auto *SI = dyn_cast<StoreInst>(&I))
4053 if (SI->isAtomic())
4054 return MachineMemOperand::MOVolatile;
4055 if (auto *LI = dyn_cast<LoadInst>(&I))
4056 if (LI->isAtomic())
4057 return MachineMemOperand::MOVolatile;
4058 if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
4059 if (AI->isAtomic())
4060 return MachineMemOperand::MOVolatile;
4061 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
4062 if (AI->isAtomic())
4063 return MachineMemOperand::MOVolatile;
4064 return MachineMemOperand::MONone;
4065 }
4066
lowerSTACKSAVE(SDValue Op,SelectionDAG & DAG) const4067 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
4068 SelectionDAG &DAG) const {
4069 MachineFunction &MF = DAG.getMachineFunction();
4070 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
4071 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
4072 report_fatal_error("Variable-sized stack allocations are not supported "
4073 "in GHC calling convention");
4074 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
4075 SystemZ::R15D, Op.getValueType());
4076 }
4077
lowerSTACKRESTORE(SDValue Op,SelectionDAG & DAG) const4078 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
4079 SelectionDAG &DAG) const {
4080 MachineFunction &MF = DAG.getMachineFunction();
4081 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
4082 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
4083
4084 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
4085 report_fatal_error("Variable-sized stack allocations are not supported "
4086 "in GHC calling convention");
4087
4088 SDValue Chain = Op.getOperand(0);
4089 SDValue NewSP = Op.getOperand(1);
4090 SDValue Backchain;
4091 SDLoc DL(Op);
4092
4093 if (StoreBackchain) {
4094 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64);
4095 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
4096 }
4097
4098 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
4099
4100 if (StoreBackchain)
4101 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
4102
4103 return Chain;
4104 }
4105
lowerPREFETCH(SDValue Op,SelectionDAG & DAG) const4106 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
4107 SelectionDAG &DAG) const {
4108 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
4109 if (!IsData)
4110 // Just preserve the chain.
4111 return Op.getOperand(0);
4112
4113 SDLoc DL(Op);
4114 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
4115 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
4116 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
4117 SDValue Ops[] = {Op.getOperand(0), DAG.getTargetConstant(Code, DL, MVT::i32),
4118 Op.getOperand(1)};
4119 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL,
4120 Node->getVTList(), Ops,
4121 Node->getMemoryVT(), Node->getMemOperand());
4122 }
4123
4124 // Convert condition code in CCReg to an i32 value.
getCCResult(SelectionDAG & DAG,SDValue CCReg)4125 static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) {
4126 SDLoc DL(CCReg);
4127 SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
4128 return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
4129 DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
4130 }
4131
4132 SDValue
lowerINTRINSIC_W_CHAIN(SDValue Op,SelectionDAG & DAG) const4133 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
4134 SelectionDAG &DAG) const {
4135 unsigned Opcode, CCValid;
4136 if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) {
4137 assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
4138 SDNode *Node = emitIntrinsicWithCCAndChain(DAG, Op, Opcode);
4139 SDValue CC = getCCResult(DAG, SDValue(Node, 0));
4140 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC);
4141 return SDValue();
4142 }
4143
4144 return SDValue();
4145 }
4146
4147 SDValue
lowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const4148 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
4149 SelectionDAG &DAG) const {
4150 unsigned Opcode, CCValid;
4151 if (isIntrinsicWithCC(Op, Opcode, CCValid)) {
4152 SDNode *Node = emitIntrinsicWithCC(DAG, Op, Opcode);
4153 if (Op->getNumValues() == 1)
4154 return getCCResult(DAG, SDValue(Node, 0));
4155 assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result");
4156 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(),
4157 SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1)));
4158 }
4159
4160 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4161 switch (Id) {
4162 case Intrinsic::thread_pointer:
4163 return lowerThreadPointer(SDLoc(Op), DAG);
4164
4165 case Intrinsic::s390_vpdi:
4166 return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(),
4167 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4168
4169 case Intrinsic::s390_vperm:
4170 return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(),
4171 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4172
4173 case Intrinsic::s390_vuphb:
4174 case Intrinsic::s390_vuphh:
4175 case Intrinsic::s390_vuphf:
4176 return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(),
4177 Op.getOperand(1));
4178
4179 case Intrinsic::s390_vuplhb:
4180 case Intrinsic::s390_vuplhh:
4181 case Intrinsic::s390_vuplhf:
4182 return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(),
4183 Op.getOperand(1));
4184
4185 case Intrinsic::s390_vuplb:
4186 case Intrinsic::s390_vuplhw:
4187 case Intrinsic::s390_vuplf:
4188 return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(),
4189 Op.getOperand(1));
4190
4191 case Intrinsic::s390_vupllb:
4192 case Intrinsic::s390_vupllh:
4193 case Intrinsic::s390_vupllf:
4194 return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(),
4195 Op.getOperand(1));
4196
4197 case Intrinsic::s390_vsumb:
4198 case Intrinsic::s390_vsumh:
4199 case Intrinsic::s390_vsumgh:
4200 case Intrinsic::s390_vsumgf:
4201 case Intrinsic::s390_vsumqf:
4202 case Intrinsic::s390_vsumqg:
4203 return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(),
4204 Op.getOperand(1), Op.getOperand(2));
4205 }
4206
4207 return SDValue();
4208 }
4209
4210 namespace {
4211 // Says that SystemZISD operation Opcode can be used to perform the equivalent
4212 // of a VPERM with permute vector Bytes. If Opcode takes three operands,
4213 // Operand is the constant third operand, otherwise it is the number of
4214 // bytes in each element of the result.
4215 struct Permute {
4216 unsigned Opcode;
4217 unsigned Operand;
4218 unsigned char Bytes[SystemZ::VectorBytes];
4219 };
4220 }
4221
4222 static const Permute PermuteForms[] = {
4223 // VMRHG
4224 { SystemZISD::MERGE_HIGH, 8,
4225 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
4226 // VMRHF
4227 { SystemZISD::MERGE_HIGH, 4,
4228 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4229 // VMRHH
4230 { SystemZISD::MERGE_HIGH, 2,
4231 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4232 // VMRHB
4233 { SystemZISD::MERGE_HIGH, 1,
4234 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4235 // VMRLG
4236 { SystemZISD::MERGE_LOW, 8,
4237 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4238 // VMRLF
4239 { SystemZISD::MERGE_LOW, 4,
4240 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4241 // VMRLH
4242 { SystemZISD::MERGE_LOW, 2,
4243 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4244 // VMRLB
4245 { SystemZISD::MERGE_LOW, 1,
4246 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4247 // VPKG
4248 { SystemZISD::PACK, 4,
4249 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4250 // VPKF
4251 { SystemZISD::PACK, 2,
4252 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4253 // VPKH
4254 { SystemZISD::PACK, 1,
4255 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4256 // VPDI V1, V2, 4 (low half of V1, high half of V2)
4257 { SystemZISD::PERMUTE_DWORDS, 4,
4258 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4259 // VPDI V1, V2, 1 (high half of V1, low half of V2)
4260 { SystemZISD::PERMUTE_DWORDS, 1,
4261 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4262 };
4263
4264 // Called after matching a vector shuffle against a particular pattern.
4265 // Both the original shuffle and the pattern have two vector operands.
4266 // OpNos[0] is the operand of the original shuffle that should be used for
4267 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
4268 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
4269 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used
4270 // for operands 0 and 1 of the pattern.
chooseShuffleOpNos(int * OpNos,unsigned & OpNo0,unsigned & OpNo1)4271 static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) {
4272 if (OpNos[0] < 0) {
4273 if (OpNos[1] < 0)
4274 return false;
4275 OpNo0 = OpNo1 = OpNos[1];
4276 } else if (OpNos[1] < 0) {
4277 OpNo0 = OpNo1 = OpNos[0];
4278 } else {
4279 OpNo0 = OpNos[0];
4280 OpNo1 = OpNos[1];
4281 }
4282 return true;
4283 }
4284
4285 // Bytes is a VPERM-like permute vector, except that -1 is used for
4286 // undefined bytes. Return true if the VPERM can be implemented using P.
4287 // When returning true set OpNo0 to the VPERM operand that should be
4288 // used for operand 0 of P and likewise OpNo1 for operand 1 of P.
4289 //
4290 // For example, if swapping the VPERM operands allows P to match, OpNo0
4291 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one
4292 // operand, but rewriting it to use two duplicated operands allows it to
4293 // match P, then OpNo0 and OpNo1 will be the same.
matchPermute(const SmallVectorImpl<int> & Bytes,const Permute & P,unsigned & OpNo0,unsigned & OpNo1)4294 static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P,
4295 unsigned &OpNo0, unsigned &OpNo1) {
4296 int OpNos[] = { -1, -1 };
4297 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) {
4298 int Elt = Bytes[I];
4299 if (Elt >= 0) {
4300 // Make sure that the two permute vectors use the same suboperand
4301 // byte number. Only the operand numbers (the high bits) are
4302 // allowed to differ.
4303 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1))
4304 return false;
4305 int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes;
4306 int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes;
4307 // Make sure that the operand mappings are consistent with previous
4308 // elements.
4309 if (OpNos[ModelOpNo] == 1 - RealOpNo)
4310 return false;
4311 OpNos[ModelOpNo] = RealOpNo;
4312 }
4313 }
4314 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
4315 }
4316
4317 // As above, but search for a matching permute.
matchPermute(const SmallVectorImpl<int> & Bytes,unsigned & OpNo0,unsigned & OpNo1)4318 static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes,
4319 unsigned &OpNo0, unsigned &OpNo1) {
4320 for (auto &P : PermuteForms)
4321 if (matchPermute(Bytes, P, OpNo0, OpNo1))
4322 return &P;
4323 return nullptr;
4324 }
4325
4326 // Bytes is a VPERM-like permute vector, except that -1 is used for
4327 // undefined bytes. This permute is an operand of an outer permute.
4328 // See whether redistributing the -1 bytes gives a shuffle that can be
4329 // implemented using P. If so, set Transform to a VPERM-like permute vector
4330 // that, when applied to the result of P, gives the original permute in Bytes.
matchDoublePermute(const SmallVectorImpl<int> & Bytes,const Permute & P,SmallVectorImpl<int> & Transform)4331 static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes,
4332 const Permute &P,
4333 SmallVectorImpl<int> &Transform) {
4334 unsigned To = 0;
4335 for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) {
4336 int Elt = Bytes[From];
4337 if (Elt < 0)
4338 // Byte number From of the result is undefined.
4339 Transform[From] = -1;
4340 else {
4341 while (P.Bytes[To] != Elt) {
4342 To += 1;
4343 if (To == SystemZ::VectorBytes)
4344 return false;
4345 }
4346 Transform[From] = To;
4347 }
4348 }
4349 return true;
4350 }
4351
4352 // As above, but search for a matching permute.
matchDoublePermute(const SmallVectorImpl<int> & Bytes,SmallVectorImpl<int> & Transform)4353 static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes,
4354 SmallVectorImpl<int> &Transform) {
4355 for (auto &P : PermuteForms)
4356 if (matchDoublePermute(Bytes, P, Transform))
4357 return &P;
4358 return nullptr;
4359 }
4360
4361 // Convert the mask of the given shuffle op into a byte-level mask,
4362 // as if it had type vNi8.
getVPermMask(SDValue ShuffleOp,SmallVectorImpl<int> & Bytes)4363 static bool getVPermMask(SDValue ShuffleOp,
4364 SmallVectorImpl<int> &Bytes) {
4365 EVT VT = ShuffleOp.getValueType();
4366 unsigned NumElements = VT.getVectorNumElements();
4367 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4368
4369 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
4370 Bytes.resize(NumElements * BytesPerElement, -1);
4371 for (unsigned I = 0; I < NumElements; ++I) {
4372 int Index = VSN->getMaskElt(I);
4373 if (Index >= 0)
4374 for (unsigned J = 0; J < BytesPerElement; ++J)
4375 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
4376 }
4377 return true;
4378 }
4379 if (SystemZISD::SPLAT == ShuffleOp.getOpcode() &&
4380 isa<ConstantSDNode>(ShuffleOp.getOperand(1))) {
4381 unsigned Index = ShuffleOp.getConstantOperandVal(1);
4382 Bytes.resize(NumElements * BytesPerElement, -1);
4383 for (unsigned I = 0; I < NumElements; ++I)
4384 for (unsigned J = 0; J < BytesPerElement; ++J)
4385 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
4386 return true;
4387 }
4388 return false;
4389 }
4390
4391 // Bytes is a VPERM-like permute vector, except that -1 is used for
4392 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of
4393 // the result come from a contiguous sequence of bytes from one input.
4394 // Set Base to the selector for the first byte if so.
getShuffleInput(const SmallVectorImpl<int> & Bytes,unsigned Start,unsigned BytesPerElement,int & Base)4395 static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start,
4396 unsigned BytesPerElement, int &Base) {
4397 Base = -1;
4398 for (unsigned I = 0; I < BytesPerElement; ++I) {
4399 if (Bytes[Start + I] >= 0) {
4400 unsigned Elem = Bytes[Start + I];
4401 if (Base < 0) {
4402 Base = Elem - I;
4403 // Make sure the bytes would come from one input operand.
4404 if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size())
4405 return false;
4406 } else if (unsigned(Base) != Elem - I)
4407 return false;
4408 }
4409 }
4410 return true;
4411 }
4412
4413 // Bytes is a VPERM-like permute vector, except that -1 is used for
4414 // undefined bytes. Return true if it can be performed using VSLDB.
4415 // When returning true, set StartIndex to the shift amount and OpNo0
4416 // and OpNo1 to the VPERM operands that should be used as the first
4417 // and second shift operand respectively.
isShlDoublePermute(const SmallVectorImpl<int> & Bytes,unsigned & StartIndex,unsigned & OpNo0,unsigned & OpNo1)4418 static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes,
4419 unsigned &StartIndex, unsigned &OpNo0,
4420 unsigned &OpNo1) {
4421 int OpNos[] = { -1, -1 };
4422 int Shift = -1;
4423 for (unsigned I = 0; I < 16; ++I) {
4424 int Index = Bytes[I];
4425 if (Index >= 0) {
4426 int ExpectedShift = (Index - I) % SystemZ::VectorBytes;
4427 int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes;
4428 int RealOpNo = unsigned(Index) / SystemZ::VectorBytes;
4429 if (Shift < 0)
4430 Shift = ExpectedShift;
4431 else if (Shift != ExpectedShift)
4432 return false;
4433 // Make sure that the operand mappings are consistent with previous
4434 // elements.
4435 if (OpNos[ModelOpNo] == 1 - RealOpNo)
4436 return false;
4437 OpNos[ModelOpNo] = RealOpNo;
4438 }
4439 }
4440 StartIndex = Shift;
4441 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
4442 }
4443
4444 // Create a node that performs P on operands Op0 and Op1, casting the
4445 // operands to the appropriate type. The type of the result is determined by P.
getPermuteNode(SelectionDAG & DAG,const SDLoc & DL,const Permute & P,SDValue Op0,SDValue Op1)4446 static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
4447 const Permute &P, SDValue Op0, SDValue Op1) {
4448 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
4449 // elements of a PACK are twice as wide as the outputs.
4450 unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 :
4451 P.Opcode == SystemZISD::PACK ? P.Operand * 2 :
4452 P.Operand);
4453 // Cast both operands to the appropriate type.
4454 MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8),
4455 SystemZ::VectorBytes / InBytes);
4456 Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0);
4457 Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1);
4458 SDValue Op;
4459 if (P.Opcode == SystemZISD::PERMUTE_DWORDS) {
4460 SDValue Op2 = DAG.getTargetConstant(P.Operand, DL, MVT::i32);
4461 Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2);
4462 } else if (P.Opcode == SystemZISD::PACK) {
4463 MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8),
4464 SystemZ::VectorBytes / P.Operand);
4465 Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1);
4466 } else {
4467 Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1);
4468 }
4469 return Op;
4470 }
4471
isZeroVector(SDValue N)4472 static bool isZeroVector(SDValue N) {
4473 if (N->getOpcode() == ISD::BITCAST)
4474 N = N->getOperand(0);
4475 if (N->getOpcode() == ISD::SPLAT_VECTOR)
4476 if (auto *Op = dyn_cast<ConstantSDNode>(N->getOperand(0)))
4477 return Op->getZExtValue() == 0;
4478 return ISD::isBuildVectorAllZeros(N.getNode());
4479 }
4480
4481 // Return the index of the zero/undef vector, or UINT32_MAX if not found.
findZeroVectorIdx(SDValue * Ops,unsigned Num)4482 static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num) {
4483 for (unsigned I = 0; I < Num ; I++)
4484 if (isZeroVector(Ops[I]))
4485 return I;
4486 return UINT32_MAX;
4487 }
4488
4489 // Bytes is a VPERM-like permute vector, except that -1 is used for
4490 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using
4491 // VSLDB or VPERM.
getGeneralPermuteNode(SelectionDAG & DAG,const SDLoc & DL,SDValue * Ops,const SmallVectorImpl<int> & Bytes)4492 static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
4493 SDValue *Ops,
4494 const SmallVectorImpl<int> &Bytes) {
4495 for (unsigned I = 0; I < 2; ++I)
4496 Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]);
4497
4498 // First see whether VSLDB can be used.
4499 unsigned StartIndex, OpNo0, OpNo1;
4500 if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1))
4501 return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0],
4502 Ops[OpNo1],
4503 DAG.getTargetConstant(StartIndex, DL, MVT::i32));
4504
4505 // Fall back on VPERM. Construct an SDNode for the permute vector. Try to
4506 // eliminate a zero vector by reusing any zero index in the permute vector.
4507 unsigned ZeroVecIdx = findZeroVectorIdx(&Ops[0], 2);
4508 if (ZeroVecIdx != UINT32_MAX) {
4509 bool MaskFirst = true;
4510 int ZeroIdx = -1;
4511 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) {
4512 unsigned OpNo = unsigned(Bytes[I]) / SystemZ::VectorBytes;
4513 unsigned Byte = unsigned(Bytes[I]) % SystemZ::VectorBytes;
4514 if (OpNo == ZeroVecIdx && I == 0) {
4515 // If the first byte is zero, use mask as first operand.
4516 ZeroIdx = 0;
4517 break;
4518 }
4519 if (OpNo != ZeroVecIdx && Byte == 0) {
4520 // If mask contains a zero, use it by placing that vector first.
4521 ZeroIdx = I + SystemZ::VectorBytes;
4522 MaskFirst = false;
4523 break;
4524 }
4525 }
4526 if (ZeroIdx != -1) {
4527 SDValue IndexNodes[SystemZ::VectorBytes];
4528 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) {
4529 if (Bytes[I] >= 0) {
4530 unsigned OpNo = unsigned(Bytes[I]) / SystemZ::VectorBytes;
4531 unsigned Byte = unsigned(Bytes[I]) % SystemZ::VectorBytes;
4532 if (OpNo == ZeroVecIdx)
4533 IndexNodes[I] = DAG.getConstant(ZeroIdx, DL, MVT::i32);
4534 else {
4535 unsigned BIdx = MaskFirst ? Byte + SystemZ::VectorBytes : Byte;
4536 IndexNodes[I] = DAG.getConstant(BIdx, DL, MVT::i32);
4537 }
4538 } else
4539 IndexNodes[I] = DAG.getUNDEF(MVT::i32);
4540 }
4541 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
4542 SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0];
4543 if (MaskFirst)
4544 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Mask, Src,
4545 Mask);
4546 else
4547 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Src, Mask,
4548 Mask);
4549 }
4550 }
4551
4552 SDValue IndexNodes[SystemZ::VectorBytes];
4553 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
4554 if (Bytes[I] >= 0)
4555 IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32);
4556 else
4557 IndexNodes[I] = DAG.getUNDEF(MVT::i32);
4558 SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
4559 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0],
4560 (!Ops[1].isUndef() ? Ops[1] : Ops[0]), Op2);
4561 }
4562
4563 namespace {
4564 // Describes a general N-operand vector shuffle.
4565 struct GeneralShuffle {
GeneralShuffle__anond620adc00411::GeneralShuffle4566 GeneralShuffle(EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {}
4567 void addUndef();
4568 bool add(SDValue, unsigned);
4569 SDValue getNode(SelectionDAG &, const SDLoc &);
4570 void tryPrepareForUnpack();
unpackWasPrepared__anond620adc00411::GeneralShuffle4571 bool unpackWasPrepared() { return UnpackFromEltSize <= 4; }
4572 SDValue insertUnpackIfPrepared(SelectionDAG &DAG, const SDLoc &DL, SDValue Op);
4573
4574 // The operands of the shuffle.
4575 SmallVector<SDValue, SystemZ::VectorBytes> Ops;
4576
4577 // Index I is -1 if byte I of the result is undefined. Otherwise the
4578 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
4579 // Bytes[I] / SystemZ::VectorBytes.
4580 SmallVector<int, SystemZ::VectorBytes> Bytes;
4581
4582 // The type of the shuffle result.
4583 EVT VT;
4584
4585 // Holds a value of 1, 2 or 4 if a final unpack has been prepared for.
4586 unsigned UnpackFromEltSize;
4587 };
4588 }
4589
4590 // Add an extra undefined element to the shuffle.
addUndef()4591 void GeneralShuffle::addUndef() {
4592 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4593 for (unsigned I = 0; I < BytesPerElement; ++I)
4594 Bytes.push_back(-1);
4595 }
4596
4597 // Add an extra element to the shuffle, taking it from element Elem of Op.
4598 // A null Op indicates a vector input whose value will be calculated later;
4599 // there is at most one such input per shuffle and it always has the same
4600 // type as the result. Aborts and returns false if the source vector elements
4601 // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
4602 // LLVM they become implicitly extended, but this is rare and not optimized.
add(SDValue Op,unsigned Elem)4603 bool GeneralShuffle::add(SDValue Op, unsigned Elem) {
4604 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4605
4606 // The source vector can have wider elements than the result,
4607 // either through an explicit TRUNCATE or because of type legalization.
4608 // We want the least significant part.
4609 EVT FromVT = Op.getNode() ? Op.getValueType() : VT;
4610 unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize();
4611
4612 // Return false if the source elements are smaller than their destination
4613 // elements.
4614 if (FromBytesPerElement < BytesPerElement)
4615 return false;
4616
4617 unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes +
4618 (FromBytesPerElement - BytesPerElement));
4619
4620 // Look through things like shuffles and bitcasts.
4621 while (Op.getNode()) {
4622 if (Op.getOpcode() == ISD::BITCAST)
4623 Op = Op.getOperand(0);
4624 else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) {
4625 // See whether the bytes we need come from a contiguous part of one
4626 // operand.
4627 SmallVector<int, SystemZ::VectorBytes> OpBytes;
4628 if (!getVPermMask(Op, OpBytes))
4629 break;
4630 int NewByte;
4631 if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte))
4632 break;
4633 if (NewByte < 0) {
4634 addUndef();
4635 return true;
4636 }
4637 Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes);
4638 Byte = unsigned(NewByte) % SystemZ::VectorBytes;
4639 } else if (Op.isUndef()) {
4640 addUndef();
4641 return true;
4642 } else
4643 break;
4644 }
4645
4646 // Make sure that the source of the extraction is in Ops.
4647 unsigned OpNo = 0;
4648 for (; OpNo < Ops.size(); ++OpNo)
4649 if (Ops[OpNo] == Op)
4650 break;
4651 if (OpNo == Ops.size())
4652 Ops.push_back(Op);
4653
4654 // Add the element to Bytes.
4655 unsigned Base = OpNo * SystemZ::VectorBytes + Byte;
4656 for (unsigned I = 0; I < BytesPerElement; ++I)
4657 Bytes.push_back(Base + I);
4658
4659 return true;
4660 }
4661
4662 // Return SDNodes for the completed shuffle.
getNode(SelectionDAG & DAG,const SDLoc & DL)4663 SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) {
4664 assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector");
4665
4666 if (Ops.size() == 0)
4667 return DAG.getUNDEF(VT);
4668
4669 // Use a single unpack if possible as the last operation.
4670 tryPrepareForUnpack();
4671
4672 // Make sure that there are at least two shuffle operands.
4673 if (Ops.size() == 1)
4674 Ops.push_back(DAG.getUNDEF(MVT::v16i8));
4675
4676 // Create a tree of shuffles, deferring root node until after the loop.
4677 // Try to redistribute the undefined elements of non-root nodes so that
4678 // the non-root shuffles match something like a pack or merge, then adjust
4679 // the parent node's permute vector to compensate for the new order.
4680 // Among other things, this copes with vectors like <2 x i16> that were
4681 // padded with undefined elements during type legalization.
4682 //
4683 // In the best case this redistribution will lead to the whole tree
4684 // using packs and merges. It should rarely be a loss in other cases.
4685 unsigned Stride = 1;
4686 for (; Stride * 2 < Ops.size(); Stride *= 2) {
4687 for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) {
4688 SDValue SubOps[] = { Ops[I], Ops[I + Stride] };
4689
4690 // Create a mask for just these two operands.
4691 SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes);
4692 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
4693 unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes;
4694 unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes;
4695 if (OpNo == I)
4696 NewBytes[J] = Byte;
4697 else if (OpNo == I + Stride)
4698 NewBytes[J] = SystemZ::VectorBytes + Byte;
4699 else
4700 NewBytes[J] = -1;
4701 }
4702 // See if it would be better to reorganize NewMask to avoid using VPERM.
4703 SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes);
4704 if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) {
4705 Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]);
4706 // Applying NewBytesMap to Ops[I] gets back to NewBytes.
4707 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
4708 if (NewBytes[J] >= 0) {
4709 assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
4710 "Invalid double permute");
4711 Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J];
4712 } else
4713 assert(NewBytesMap[J] < 0 && "Invalid double permute");
4714 }
4715 } else {
4716 // Just use NewBytes on the operands.
4717 Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes);
4718 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J)
4719 if (NewBytes[J] >= 0)
4720 Bytes[J] = I * SystemZ::VectorBytes + J;
4721 }
4722 }
4723 }
4724
4725 // Now we just have 2 inputs. Put the second operand in Ops[1].
4726 if (Stride > 1) {
4727 Ops[1] = Ops[Stride];
4728 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
4729 if (Bytes[I] >= int(SystemZ::VectorBytes))
4730 Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes;
4731 }
4732
4733 // Look for an instruction that can do the permute without resorting
4734 // to VPERM.
4735 unsigned OpNo0, OpNo1;
4736 SDValue Op;
4737 if (unpackWasPrepared() && Ops[1].isUndef())
4738 Op = Ops[0];
4739 else if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1))
4740 Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]);
4741 else
4742 Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes);
4743
4744 Op = insertUnpackIfPrepared(DAG, DL, Op);
4745
4746 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4747 }
4748
4749 #ifndef NDEBUG
dumpBytes(const SmallVectorImpl<int> & Bytes,std::string Msg)4750 static void dumpBytes(const SmallVectorImpl<int> &Bytes, std::string Msg) {
4751 dbgs() << Msg.c_str() << " { ";
4752 for (unsigned i = 0; i < Bytes.size(); i++)
4753 dbgs() << Bytes[i] << " ";
4754 dbgs() << "}\n";
4755 }
4756 #endif
4757
4758 // If the Bytes vector matches an unpack operation, prepare to do the unpack
4759 // after all else by removing the zero vector and the effect of the unpack on
4760 // Bytes.
tryPrepareForUnpack()4761 void GeneralShuffle::tryPrepareForUnpack() {
4762 uint32_t ZeroVecOpNo = findZeroVectorIdx(&Ops[0], Ops.size());
4763 if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1)
4764 return;
4765
4766 // Only do this if removing the zero vector reduces the depth, otherwise
4767 // the critical path will increase with the final unpack.
4768 if (Ops.size() > 2 &&
4769 Log2_32_Ceil(Ops.size()) == Log2_32_Ceil(Ops.size() - 1))
4770 return;
4771
4772 // Find an unpack that would allow removing the zero vector from Ops.
4773 UnpackFromEltSize = 1;
4774 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
4775 bool MatchUnpack = true;
4776 SmallVector<int, SystemZ::VectorBytes> SrcBytes;
4777 for (unsigned Elt = 0; Elt < SystemZ::VectorBytes; Elt++) {
4778 unsigned ToEltSize = UnpackFromEltSize * 2;
4779 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
4780 if (!IsZextByte)
4781 SrcBytes.push_back(Bytes[Elt]);
4782 if (Bytes[Elt] != -1) {
4783 unsigned OpNo = unsigned(Bytes[Elt]) / SystemZ::VectorBytes;
4784 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
4785 MatchUnpack = false;
4786 break;
4787 }
4788 }
4789 }
4790 if (MatchUnpack) {
4791 if (Ops.size() == 2) {
4792 // Don't use unpack if a single source operand needs rearrangement.
4793 for (unsigned i = 0; i < SystemZ::VectorBytes / 2; i++)
4794 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 != int(i)) {
4795 UnpackFromEltSize = UINT_MAX;
4796 return;
4797 }
4798 }
4799 break;
4800 }
4801 }
4802 if (UnpackFromEltSize > 4)
4803 return;
4804
4805 LLVM_DEBUG(dbgs() << "Preparing for final unpack of element size "
4806 << UnpackFromEltSize << ". Zero vector is Op#" << ZeroVecOpNo
4807 << ".\n";
4808 dumpBytes(Bytes, "Original Bytes vector:"););
4809
4810 // Apply the unpack in reverse to the Bytes array.
4811 unsigned B = 0;
4812 for (unsigned Elt = 0; Elt < SystemZ::VectorBytes;) {
4813 Elt += UnpackFromEltSize;
4814 for (unsigned i = 0; i < UnpackFromEltSize; i++, Elt++, B++)
4815 Bytes[B] = Bytes[Elt];
4816 }
4817 while (B < SystemZ::VectorBytes)
4818 Bytes[B++] = -1;
4819
4820 // Remove the zero vector from Ops
4821 Ops.erase(&Ops[ZeroVecOpNo]);
4822 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
4823 if (Bytes[I] >= 0) {
4824 unsigned OpNo = unsigned(Bytes[I]) / SystemZ::VectorBytes;
4825 if (OpNo > ZeroVecOpNo)
4826 Bytes[I] -= SystemZ::VectorBytes;
4827 }
4828
4829 LLVM_DEBUG(dumpBytes(Bytes, "Resulting Bytes vector, zero vector removed:");
4830 dbgs() << "\n";);
4831 }
4832
insertUnpackIfPrepared(SelectionDAG & DAG,const SDLoc & DL,SDValue Op)4833 SDValue GeneralShuffle::insertUnpackIfPrepared(SelectionDAG &DAG,
4834 const SDLoc &DL,
4835 SDValue Op) {
4836 if (!unpackWasPrepared())
4837 return Op;
4838 unsigned InBits = UnpackFromEltSize * 8;
4839 EVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBits),
4840 SystemZ::VectorBits / InBits);
4841 SDValue PackedOp = DAG.getNode(ISD::BITCAST, DL, InVT, Op);
4842 unsigned OutBits = InBits * 2;
4843 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(OutBits),
4844 SystemZ::VectorBits / OutBits);
4845 return DAG.getNode(SystemZISD::UNPACKL_HIGH, DL, OutVT, PackedOp);
4846 }
4847
4848 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
isScalarToVector(SDValue Op)4849 static bool isScalarToVector(SDValue Op) {
4850 for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I)
4851 if (!Op.getOperand(I).isUndef())
4852 return false;
4853 return true;
4854 }
4855
4856 // Return a vector of type VT that contains Value in the first element.
4857 // The other elements don't matter.
buildScalarToVector(SelectionDAG & DAG,const SDLoc & DL,EVT VT,SDValue Value)4858 static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4859 SDValue Value) {
4860 // If we have a constant, replicate it to all elements and let the
4861 // BUILD_VECTOR lowering take care of it.
4862 if (Value.getOpcode() == ISD::Constant ||
4863 Value.getOpcode() == ISD::ConstantFP) {
4864 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value);
4865 return DAG.getBuildVector(VT, DL, Ops);
4866 }
4867 if (Value.isUndef())
4868 return DAG.getUNDEF(VT);
4869 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
4870 }
4871
4872 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in
4873 // element 1. Used for cases in which replication is cheap.
buildMergeScalars(SelectionDAG & DAG,const SDLoc & DL,EVT VT,SDValue Op0,SDValue Op1)4874 static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4875 SDValue Op0, SDValue Op1) {
4876 if (Op0.isUndef()) {
4877 if (Op1.isUndef())
4878 return DAG.getUNDEF(VT);
4879 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1);
4880 }
4881 if (Op1.isUndef())
4882 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0);
4883 return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT,
4884 buildScalarToVector(DAG, DL, VT, Op0),
4885 buildScalarToVector(DAG, DL, VT, Op1));
4886 }
4887
4888 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
4889 // vector for them.
joinDwords(SelectionDAG & DAG,const SDLoc & DL,SDValue Op0,SDValue Op1)4890 static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0,
4891 SDValue Op1) {
4892 if (Op0.isUndef() && Op1.isUndef())
4893 return DAG.getUNDEF(MVT::v2i64);
4894 // If one of the two inputs is undefined then replicate the other one,
4895 // in order to avoid using another register unnecessarily.
4896 if (Op0.isUndef())
4897 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4898 else if (Op1.isUndef())
4899 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4900 else {
4901 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4902 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4903 }
4904 return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1);
4905 }
4906
4907 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
4908 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
4909 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
4910 // would benefit from this representation and return it if so.
tryBuildVectorShuffle(SelectionDAG & DAG,BuildVectorSDNode * BVN)4911 static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
4912 BuildVectorSDNode *BVN) {
4913 EVT VT = BVN->getValueType(0);
4914 unsigned NumElements = VT.getVectorNumElements();
4915
4916 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
4917 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still
4918 // need a BUILD_VECTOR, add an additional placeholder operand for that
4919 // BUILD_VECTOR and store its operands in ResidueOps.
4920 GeneralShuffle GS(VT);
4921 SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps;
4922 bool FoundOne = false;
4923 for (unsigned I = 0; I < NumElements; ++I) {
4924 SDValue Op = BVN->getOperand(I);
4925 if (Op.getOpcode() == ISD::TRUNCATE)
4926 Op = Op.getOperand(0);
4927 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4928 Op.getOperand(1).getOpcode() == ISD::Constant) {
4929 unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4930 if (!GS.add(Op.getOperand(0), Elem))
4931 return SDValue();
4932 FoundOne = true;
4933 } else if (Op.isUndef()) {
4934 GS.addUndef();
4935 } else {
4936 if (!GS.add(SDValue(), ResidueOps.size()))
4937 return SDValue();
4938 ResidueOps.push_back(BVN->getOperand(I));
4939 }
4940 }
4941
4942 // Nothing to do if there are no EXTRACT_VECTOR_ELTs.
4943 if (!FoundOne)
4944 return SDValue();
4945
4946 // Create the BUILD_VECTOR for the remaining elements, if any.
4947 if (!ResidueOps.empty()) {
4948 while (ResidueOps.size() < NumElements)
4949 ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType()));
4950 for (auto &Op : GS.Ops) {
4951 if (!Op.getNode()) {
4952 Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps);
4953 break;
4954 }
4955 }
4956 }
4957 return GS.getNode(DAG, SDLoc(BVN));
4958 }
4959
isVectorElementLoad(SDValue Op) const4960 bool SystemZTargetLowering::isVectorElementLoad(SDValue Op) const {
4961 if (Op.getOpcode() == ISD::LOAD && cast<LoadSDNode>(Op)->isUnindexed())
4962 return true;
4963 if (Subtarget.hasVectorEnhancements2() && Op.getOpcode() == SystemZISD::LRV)
4964 return true;
4965 return false;
4966 }
4967
4968 // Combine GPR scalar values Elems into a vector of type VT.
4969 SDValue
buildVector(SelectionDAG & DAG,const SDLoc & DL,EVT VT,SmallVectorImpl<SDValue> & Elems) const4970 SystemZTargetLowering::buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4971 SmallVectorImpl<SDValue> &Elems) const {
4972 // See whether there is a single replicated value.
4973 SDValue Single;
4974 unsigned int NumElements = Elems.size();
4975 unsigned int Count = 0;
4976 for (auto Elem : Elems) {
4977 if (!Elem.isUndef()) {
4978 if (!Single.getNode())
4979 Single = Elem;
4980 else if (Elem != Single) {
4981 Single = SDValue();
4982 break;
4983 }
4984 Count += 1;
4985 }
4986 }
4987 // There are three cases here:
4988 //
4989 // - if the only defined element is a loaded one, the best sequence
4990 // is a replicating load.
4991 //
4992 // - otherwise, if the only defined element is an i64 value, we will
4993 // end up with the same VLVGP sequence regardless of whether we short-cut
4994 // for replication or fall through to the later code.
4995 //
4996 // - otherwise, if the only defined element is an i32 or smaller value,
4997 // we would need 2 instructions to replicate it: VLVGP followed by VREPx.
4998 // This is only a win if the single defined element is used more than once.
4999 // In other cases we're better off using a single VLVGx.
5000 if (Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
5001 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single);
5002
5003 // If all elements are loads, use VLREP/VLEs (below).
5004 bool AllLoads = true;
5005 for (auto Elem : Elems)
5006 if (!isVectorElementLoad(Elem)) {
5007 AllLoads = false;
5008 break;
5009 }
5010
5011 // The best way of building a v2i64 from two i64s is to use VLVGP.
5012 if (VT == MVT::v2i64 && !AllLoads)
5013 return joinDwords(DAG, DL, Elems[0], Elems[1]);
5014
5015 // Use a 64-bit merge high to combine two doubles.
5016 if (VT == MVT::v2f64 && !AllLoads)
5017 return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
5018
5019 // Build v4f32 values directly from the FPRs:
5020 //
5021 // <Axxx> <Bxxx> <Cxxxx> <Dxxx>
5022 // V V VMRHF
5023 // <ABxx> <CDxx>
5024 // V VMRHG
5025 // <ABCD>
5026 if (VT == MVT::v4f32 && !AllLoads) {
5027 SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
5028 SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]);
5029 // Avoid unnecessary undefs by reusing the other operand.
5030 if (Op01.isUndef())
5031 Op01 = Op23;
5032 else if (Op23.isUndef())
5033 Op23 = Op01;
5034 // Merging identical replications is a no-op.
5035 if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23)
5036 return Op01;
5037 Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01);
5038 Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23);
5039 SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH,
5040 DL, MVT::v2i64, Op01, Op23);
5041 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
5042 }
5043
5044 // Collect the constant terms.
5045 SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue());
5046 SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false);
5047
5048 unsigned NumConstants = 0;
5049 for (unsigned I = 0; I < NumElements; ++I) {
5050 SDValue Elem = Elems[I];
5051 if (Elem.getOpcode() == ISD::Constant ||
5052 Elem.getOpcode() == ISD::ConstantFP) {
5053 NumConstants += 1;
5054 Constants[I] = Elem;
5055 Done[I] = true;
5056 }
5057 }
5058 // If there was at least one constant, fill in the other elements of
5059 // Constants with undefs to get a full vector constant and use that
5060 // as the starting point.
5061 SDValue Result;
5062 SDValue ReplicatedVal;
5063 if (NumConstants > 0) {
5064 for (unsigned I = 0; I < NumElements; ++I)
5065 if (!Constants[I].getNode())
5066 Constants[I] = DAG.getUNDEF(Elems[I].getValueType());
5067 Result = DAG.getBuildVector(VT, DL, Constants);
5068 } else {
5069 // Otherwise try to use VLREP or VLVGP to start the sequence in order to
5070 // avoid a false dependency on any previous contents of the vector
5071 // register.
5072
5073 // Use a VLREP if at least one element is a load. Make sure to replicate
5074 // the load with the most elements having its value.
5075 std::map<const SDNode*, unsigned> UseCounts;
5076 SDNode *LoadMaxUses = nullptr;
5077 for (unsigned I = 0; I < NumElements; ++I)
5078 if (isVectorElementLoad(Elems[I])) {
5079 SDNode *Ld = Elems[I].getNode();
5080 UseCounts[Ld]++;
5081 if (LoadMaxUses == nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
5082 LoadMaxUses = Ld;
5083 }
5084 if (LoadMaxUses != nullptr) {
5085 ReplicatedVal = SDValue(LoadMaxUses, 0);
5086 Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, ReplicatedVal);
5087 } else {
5088 // Try to use VLVGP.
5089 unsigned I1 = NumElements / 2 - 1;
5090 unsigned I2 = NumElements - 1;
5091 bool Def1 = !Elems[I1].isUndef();
5092 bool Def2 = !Elems[I2].isUndef();
5093 if (Def1 || Def2) {
5094 SDValue Elem1 = Elems[Def1 ? I1 : I2];
5095 SDValue Elem2 = Elems[Def2 ? I2 : I1];
5096 Result = DAG.getNode(ISD::BITCAST, DL, VT,
5097 joinDwords(DAG, DL, Elem1, Elem2));
5098 Done[I1] = true;
5099 Done[I2] = true;
5100 } else
5101 Result = DAG.getUNDEF(VT);
5102 }
5103 }
5104
5105 // Use VLVGx to insert the other elements.
5106 for (unsigned I = 0; I < NumElements; ++I)
5107 if (!Done[I] && !Elems[I].isUndef() && Elems[I] != ReplicatedVal)
5108 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I],
5109 DAG.getConstant(I, DL, MVT::i32));
5110 return Result;
5111 }
5112
lowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG) const5113 SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op,
5114 SelectionDAG &DAG) const {
5115 auto *BVN = cast<BuildVectorSDNode>(Op.getNode());
5116 SDLoc DL(Op);
5117 EVT VT = Op.getValueType();
5118
5119 if (BVN->isConstant()) {
5120 if (SystemZVectorConstantInfo(BVN).isVectorConstantLegal(Subtarget))
5121 return Op;
5122
5123 // Fall back to loading it from memory.
5124 return SDValue();
5125 }
5126
5127 // See if we should use shuffles to construct the vector from other vectors.
5128 if (SDValue Res = tryBuildVectorShuffle(DAG, BVN))
5129 return Res;
5130
5131 // Detect SCALAR_TO_VECTOR conversions.
5132 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op))
5133 return buildScalarToVector(DAG, DL, VT, Op.getOperand(0));
5134
5135 // Otherwise use buildVector to build the vector up from GPRs.
5136 unsigned NumElements = Op.getNumOperands();
5137 SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements);
5138 for (unsigned I = 0; I < NumElements; ++I)
5139 Ops[I] = Op.getOperand(I);
5140 return buildVector(DAG, DL, VT, Ops);
5141 }
5142
lowerVECTOR_SHUFFLE(SDValue Op,SelectionDAG & DAG) const5143 SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
5144 SelectionDAG &DAG) const {
5145 auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode());
5146 SDLoc DL(Op);
5147 EVT VT = Op.getValueType();
5148 unsigned NumElements = VT.getVectorNumElements();
5149
5150 if (VSN->isSplat()) {
5151 SDValue Op0 = Op.getOperand(0);
5152 unsigned Index = VSN->getSplatIndex();
5153 assert(Index < VT.getVectorNumElements() &&
5154 "Splat index should be defined and in first operand");
5155 // See whether the value we're splatting is directly available as a scalar.
5156 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
5157 Op0.getOpcode() == ISD::BUILD_VECTOR)
5158 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index));
5159 // Otherwise keep it as a vector-to-vector operation.
5160 return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0),
5161 DAG.getTargetConstant(Index, DL, MVT::i32));
5162 }
5163
5164 GeneralShuffle GS(VT);
5165 for (unsigned I = 0; I < NumElements; ++I) {
5166 int Elt = VSN->getMaskElt(I);
5167 if (Elt < 0)
5168 GS.addUndef();
5169 else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements),
5170 unsigned(Elt) % NumElements))
5171 return SDValue();
5172 }
5173 return GS.getNode(DAG, SDLoc(VSN));
5174 }
5175
lowerSCALAR_TO_VECTOR(SDValue Op,SelectionDAG & DAG) const5176 SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
5177 SelectionDAG &DAG) const {
5178 SDLoc DL(Op);
5179 // Just insert the scalar into element 0 of an undefined vector.
5180 return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL,
5181 Op.getValueType(), DAG.getUNDEF(Op.getValueType()),
5182 Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32));
5183 }
5184
lowerINSERT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const5185 SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
5186 SelectionDAG &DAG) const {
5187 // Handle insertions of floating-point values.
5188 SDLoc DL(Op);
5189 SDValue Op0 = Op.getOperand(0);
5190 SDValue Op1 = Op.getOperand(1);
5191 SDValue Op2 = Op.getOperand(2);
5192 EVT VT = Op.getValueType();
5193
5194 // Insertions into constant indices of a v2f64 can be done using VPDI.
5195 // However, if the inserted value is a bitcast or a constant then it's
5196 // better to use GPRs, as below.
5197 if (VT == MVT::v2f64 &&
5198 Op1.getOpcode() != ISD::BITCAST &&
5199 Op1.getOpcode() != ISD::ConstantFP &&
5200 Op2.getOpcode() == ISD::Constant) {
5201 uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue();
5202 unsigned Mask = VT.getVectorNumElements() - 1;
5203 if (Index <= Mask)
5204 return Op;
5205 }
5206
5207 // Otherwise bitcast to the equivalent integer form and insert via a GPR.
5208 MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
5209 MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements());
5210 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT,
5211 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0),
5212 DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2);
5213 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
5214 }
5215
5216 SDValue
lowerEXTRACT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const5217 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
5218 SelectionDAG &DAG) const {
5219 // Handle extractions of floating-point values.
5220 SDLoc DL(Op);
5221 SDValue Op0 = Op.getOperand(0);
5222 SDValue Op1 = Op.getOperand(1);
5223 EVT VT = Op.getValueType();
5224 EVT VecVT = Op0.getValueType();
5225
5226 // Extractions of constant indices can be done directly.
5227 if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
5228 uint64_t Index = CIndexN->getZExtValue();
5229 unsigned Mask = VecVT.getVectorNumElements() - 1;
5230 if (Index <= Mask)
5231 return Op;
5232 }
5233
5234 // Otherwise bitcast to the equivalent integer form and extract via a GPR.
5235 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits());
5236 MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements());
5237 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT,
5238 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1);
5239 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
5240 }
5241
5242 SDValue SystemZTargetLowering::
lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op,SelectionDAG & DAG) const5243 lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const {
5244 SDValue PackedOp = Op.getOperand(0);
5245 EVT OutVT = Op.getValueType();
5246 EVT InVT = PackedOp.getValueType();
5247 unsigned ToBits = OutVT.getScalarSizeInBits();
5248 unsigned FromBits = InVT.getScalarSizeInBits();
5249 do {
5250 FromBits *= 2;
5251 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits),
5252 SystemZ::VectorBits / FromBits);
5253 PackedOp =
5254 DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(PackedOp), OutVT, PackedOp);
5255 } while (FromBits != ToBits);
5256 return PackedOp;
5257 }
5258
5259 // Lower a ZERO_EXTEND_VECTOR_INREG to a vector shuffle with a zero vector.
5260 SDValue SystemZTargetLowering::
lowerZERO_EXTEND_VECTOR_INREG(SDValue Op,SelectionDAG & DAG) const5261 lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const {
5262 SDValue PackedOp = Op.getOperand(0);
5263 SDLoc DL(Op);
5264 EVT OutVT = Op.getValueType();
5265 EVT InVT = PackedOp.getValueType();
5266 unsigned InNumElts = InVT.getVectorNumElements();
5267 unsigned OutNumElts = OutVT.getVectorNumElements();
5268 unsigned NumInPerOut = InNumElts / OutNumElts;
5269
5270 SDValue ZeroVec =
5271 DAG.getSplatVector(InVT, DL, DAG.getConstant(0, DL, InVT.getScalarType()));
5272
5273 SmallVector<int, 16> Mask(InNumElts);
5274 unsigned ZeroVecElt = InNumElts;
5275 for (unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
5276 unsigned MaskElt = PackedElt * NumInPerOut;
5277 unsigned End = MaskElt + NumInPerOut - 1;
5278 for (; MaskElt < End; MaskElt++)
5279 Mask[MaskElt] = ZeroVecElt++;
5280 Mask[MaskElt] = PackedElt;
5281 }
5282 SDValue Shuf = DAG.getVectorShuffle(InVT, DL, PackedOp, ZeroVec, Mask);
5283 return DAG.getNode(ISD::BITCAST, DL, OutVT, Shuf);
5284 }
5285
lowerShift(SDValue Op,SelectionDAG & DAG,unsigned ByScalar) const5286 SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG,
5287 unsigned ByScalar) const {
5288 // Look for cases where a vector shift can use the *_BY_SCALAR form.
5289 SDValue Op0 = Op.getOperand(0);
5290 SDValue Op1 = Op.getOperand(1);
5291 SDLoc DL(Op);
5292 EVT VT = Op.getValueType();
5293 unsigned ElemBitSize = VT.getScalarSizeInBits();
5294
5295 // See whether the shift vector is a splat represented as BUILD_VECTOR.
5296 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
5297 APInt SplatBits, SplatUndef;
5298 unsigned SplatBitSize;
5299 bool HasAnyUndefs;
5300 // Check for constant splats. Use ElemBitSize as the minimum element
5301 // width and reject splats that need wider elements.
5302 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
5303 ElemBitSize, true) &&
5304 SplatBitSize == ElemBitSize) {
5305 SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff,
5306 DL, MVT::i32);
5307 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
5308 }
5309 // Check for variable splats.
5310 BitVector UndefElements;
5311 SDValue Splat = BVN->getSplatValue(&UndefElements);
5312 if (Splat) {
5313 // Since i32 is the smallest legal type, we either need a no-op
5314 // or a truncation.
5315 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat);
5316 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
5317 }
5318 }
5319
5320 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR,
5321 // and the shift amount is directly available in a GPR.
5322 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
5323 if (VSN->isSplat()) {
5324 SDValue VSNOp0 = VSN->getOperand(0);
5325 unsigned Index = VSN->getSplatIndex();
5326 assert(Index < VT.getVectorNumElements() &&
5327 "Splat index should be defined and in first operand");
5328 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
5329 VSNOp0.getOpcode() == ISD::BUILD_VECTOR) {
5330 // Since i32 is the smallest legal type, we either need a no-op
5331 // or a truncation.
5332 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
5333 VSNOp0.getOperand(Index));
5334 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
5335 }
5336 }
5337 }
5338
5339 // Otherwise just treat the current form as legal.
5340 return Op;
5341 }
5342
LowerOperation(SDValue Op,SelectionDAG & DAG) const5343 SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
5344 SelectionDAG &DAG) const {
5345 switch (Op.getOpcode()) {
5346 case ISD::FRAMEADDR:
5347 return lowerFRAMEADDR(Op, DAG);
5348 case ISD::RETURNADDR:
5349 return lowerRETURNADDR(Op, DAG);
5350 case ISD::BR_CC:
5351 return lowerBR_CC(Op, DAG);
5352 case ISD::SELECT_CC:
5353 return lowerSELECT_CC(Op, DAG);
5354 case ISD::SETCC:
5355 return lowerSETCC(Op, DAG);
5356 case ISD::STRICT_FSETCC:
5357 return lowerSTRICT_FSETCC(Op, DAG, false);
5358 case ISD::STRICT_FSETCCS:
5359 return lowerSTRICT_FSETCC(Op, DAG, true);
5360 case ISD::GlobalAddress:
5361 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
5362 case ISD::GlobalTLSAddress:
5363 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
5364 case ISD::BlockAddress:
5365 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
5366 case ISD::JumpTable:
5367 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
5368 case ISD::ConstantPool:
5369 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
5370 case ISD::BITCAST:
5371 return lowerBITCAST(Op, DAG);
5372 case ISD::VASTART:
5373 return lowerVASTART(Op, DAG);
5374 case ISD::VACOPY:
5375 return lowerVACOPY(Op, DAG);
5376 case ISD::DYNAMIC_STACKALLOC:
5377 return lowerDYNAMIC_STACKALLOC(Op, DAG);
5378 case ISD::GET_DYNAMIC_AREA_OFFSET:
5379 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
5380 case ISD::SMUL_LOHI:
5381 return lowerSMUL_LOHI(Op, DAG);
5382 case ISD::UMUL_LOHI:
5383 return lowerUMUL_LOHI(Op, DAG);
5384 case ISD::SDIVREM:
5385 return lowerSDIVREM(Op, DAG);
5386 case ISD::UDIVREM:
5387 return lowerUDIVREM(Op, DAG);
5388 case ISD::SADDO:
5389 case ISD::SSUBO:
5390 case ISD::UADDO:
5391 case ISD::USUBO:
5392 return lowerXALUO(Op, DAG);
5393 case ISD::ADDCARRY:
5394 case ISD::SUBCARRY:
5395 return lowerADDSUBCARRY(Op, DAG);
5396 case ISD::OR:
5397 return lowerOR(Op, DAG);
5398 case ISD::CTPOP:
5399 return lowerCTPOP(Op, DAG);
5400 case ISD::ATOMIC_FENCE:
5401 return lowerATOMIC_FENCE(Op, DAG);
5402 case ISD::ATOMIC_SWAP:
5403 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
5404 case ISD::ATOMIC_STORE:
5405 return lowerATOMIC_STORE(Op, DAG);
5406 case ISD::ATOMIC_LOAD:
5407 return lowerATOMIC_LOAD(Op, DAG);
5408 case ISD::ATOMIC_LOAD_ADD:
5409 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
5410 case ISD::ATOMIC_LOAD_SUB:
5411 return lowerATOMIC_LOAD_SUB(Op, DAG);
5412 case ISD::ATOMIC_LOAD_AND:
5413 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
5414 case ISD::ATOMIC_LOAD_OR:
5415 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
5416 case ISD::ATOMIC_LOAD_XOR:
5417 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
5418 case ISD::ATOMIC_LOAD_NAND:
5419 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
5420 case ISD::ATOMIC_LOAD_MIN:
5421 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
5422 case ISD::ATOMIC_LOAD_MAX:
5423 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
5424 case ISD::ATOMIC_LOAD_UMIN:
5425 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
5426 case ISD::ATOMIC_LOAD_UMAX:
5427 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
5428 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
5429 return lowerATOMIC_CMP_SWAP(Op, DAG);
5430 case ISD::STACKSAVE:
5431 return lowerSTACKSAVE(Op, DAG);
5432 case ISD::STACKRESTORE:
5433 return lowerSTACKRESTORE(Op, DAG);
5434 case ISD::PREFETCH:
5435 return lowerPREFETCH(Op, DAG);
5436 case ISD::INTRINSIC_W_CHAIN:
5437 return lowerINTRINSIC_W_CHAIN(Op, DAG);
5438 case ISD::INTRINSIC_WO_CHAIN:
5439 return lowerINTRINSIC_WO_CHAIN(Op, DAG);
5440 case ISD::BUILD_VECTOR:
5441 return lowerBUILD_VECTOR(Op, DAG);
5442 case ISD::VECTOR_SHUFFLE:
5443 return lowerVECTOR_SHUFFLE(Op, DAG);
5444 case ISD::SCALAR_TO_VECTOR:
5445 return lowerSCALAR_TO_VECTOR(Op, DAG);
5446 case ISD::INSERT_VECTOR_ELT:
5447 return lowerINSERT_VECTOR_ELT(Op, DAG);
5448 case ISD::EXTRACT_VECTOR_ELT:
5449 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
5450 case ISD::SIGN_EXTEND_VECTOR_INREG:
5451 return lowerSIGN_EXTEND_VECTOR_INREG(Op, DAG);
5452 case ISD::ZERO_EXTEND_VECTOR_INREG:
5453 return lowerZERO_EXTEND_VECTOR_INREG(Op, DAG);
5454 case ISD::SHL:
5455 return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR);
5456 case ISD::SRL:
5457 return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR);
5458 case ISD::SRA:
5459 return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR);
5460 default:
5461 llvm_unreachable("Unexpected node to lower");
5462 }
5463 }
5464
5465 // Lower operations with invalid operand or result types (currently used
5466 // only for 128-bit integer types).
5467
lowerI128ToGR128(SelectionDAG & DAG,SDValue In)5468 static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) {
5469 SDLoc DL(In);
5470 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In,
5471 DAG.getIntPtrConstant(0, DL));
5472 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In,
5473 DAG.getIntPtrConstant(1, DL));
5474 SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL,
5475 MVT::Untyped, Hi, Lo);
5476 return SDValue(Pair, 0);
5477 }
5478
lowerGR128ToI128(SelectionDAG & DAG,SDValue In)5479 static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) {
5480 SDLoc DL(In);
5481 SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64,
5482 DL, MVT::i64, In);
5483 SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64,
5484 DL, MVT::i64, In);
5485 return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi);
5486 }
5487
5488 void
LowerOperationWrapper(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const5489 SystemZTargetLowering::LowerOperationWrapper(SDNode *N,
5490 SmallVectorImpl<SDValue> &Results,
5491 SelectionDAG &DAG) const {
5492 switch (N->getOpcode()) {
5493 case ISD::ATOMIC_LOAD: {
5494 SDLoc DL(N);
5495 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other);
5496 SDValue Ops[] = { N->getOperand(0), N->getOperand(1) };
5497 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
5498 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128,
5499 DL, Tys, Ops, MVT::i128, MMO);
5500 Results.push_back(lowerGR128ToI128(DAG, Res));
5501 Results.push_back(Res.getValue(1));
5502 break;
5503 }
5504 case ISD::ATOMIC_STORE: {
5505 SDLoc DL(N);
5506 SDVTList Tys = DAG.getVTList(MVT::Other);
5507 SDValue Ops[] = { N->getOperand(0),
5508 lowerI128ToGR128(DAG, N->getOperand(2)),
5509 N->getOperand(1) };
5510 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
5511 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128,
5512 DL, Tys, Ops, MVT::i128, MMO);
5513 // We have to enforce sequential consistency by performing a
5514 // serialization operation after the store.
5515 if (cast<AtomicSDNode>(N)->getOrdering() ==
5516 AtomicOrdering::SequentiallyConsistent)
5517 Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL,
5518 MVT::Other, Res), 0);
5519 Results.push_back(Res);
5520 break;
5521 }
5522 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
5523 SDLoc DL(N);
5524 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other);
5525 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
5526 lowerI128ToGR128(DAG, N->getOperand(2)),
5527 lowerI128ToGR128(DAG, N->getOperand(3)) };
5528 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
5529 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128,
5530 DL, Tys, Ops, MVT::i128, MMO);
5531 SDValue Success = emitSETCC(DAG, DL, Res.getValue(1),
5532 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ);
5533 Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1));
5534 Results.push_back(lowerGR128ToI128(DAG, Res));
5535 Results.push_back(Success);
5536 Results.push_back(Res.getValue(2));
5537 break;
5538 }
5539 default:
5540 llvm_unreachable("Unexpected node to lower");
5541 }
5542 }
5543
5544 void
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const5545 SystemZTargetLowering::ReplaceNodeResults(SDNode *N,
5546 SmallVectorImpl<SDValue> &Results,
5547 SelectionDAG &DAG) const {
5548 return LowerOperationWrapper(N, Results, DAG);
5549 }
5550
getTargetNodeName(unsigned Opcode) const5551 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
5552 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
5553 switch ((SystemZISD::NodeType)Opcode) {
5554 case SystemZISD::FIRST_NUMBER: break;
5555 OPCODE(RET_FLAG);
5556 OPCODE(CALL);
5557 OPCODE(SIBCALL);
5558 OPCODE(TLS_GDCALL);
5559 OPCODE(TLS_LDCALL);
5560 OPCODE(PCREL_WRAPPER);
5561 OPCODE(PCREL_OFFSET);
5562 OPCODE(IABS);
5563 OPCODE(ICMP);
5564 OPCODE(FCMP);
5565 OPCODE(STRICT_FCMP);
5566 OPCODE(STRICT_FCMPS);
5567 OPCODE(TM);
5568 OPCODE(BR_CCMASK);
5569 OPCODE(SELECT_CCMASK);
5570 OPCODE(ADJDYNALLOC);
5571 OPCODE(PROBED_ALLOCA);
5572 OPCODE(POPCNT);
5573 OPCODE(SMUL_LOHI);
5574 OPCODE(UMUL_LOHI);
5575 OPCODE(SDIVREM);
5576 OPCODE(UDIVREM);
5577 OPCODE(SADDO);
5578 OPCODE(SSUBO);
5579 OPCODE(UADDO);
5580 OPCODE(USUBO);
5581 OPCODE(ADDCARRY);
5582 OPCODE(SUBCARRY);
5583 OPCODE(GET_CCMASK);
5584 OPCODE(MVC);
5585 OPCODE(MVC_LOOP);
5586 OPCODE(NC);
5587 OPCODE(NC_LOOP);
5588 OPCODE(OC);
5589 OPCODE(OC_LOOP);
5590 OPCODE(XC);
5591 OPCODE(XC_LOOP);
5592 OPCODE(CLC);
5593 OPCODE(CLC_LOOP);
5594 OPCODE(STPCPY);
5595 OPCODE(STRCMP);
5596 OPCODE(SEARCH_STRING);
5597 OPCODE(IPM);
5598 OPCODE(MEMBARRIER);
5599 OPCODE(TBEGIN);
5600 OPCODE(TBEGIN_NOFLOAT);
5601 OPCODE(TEND);
5602 OPCODE(BYTE_MASK);
5603 OPCODE(ROTATE_MASK);
5604 OPCODE(REPLICATE);
5605 OPCODE(JOIN_DWORDS);
5606 OPCODE(SPLAT);
5607 OPCODE(MERGE_HIGH);
5608 OPCODE(MERGE_LOW);
5609 OPCODE(SHL_DOUBLE);
5610 OPCODE(PERMUTE_DWORDS);
5611 OPCODE(PERMUTE);
5612 OPCODE(PACK);
5613 OPCODE(PACKS_CC);
5614 OPCODE(PACKLS_CC);
5615 OPCODE(UNPACK_HIGH);
5616 OPCODE(UNPACKL_HIGH);
5617 OPCODE(UNPACK_LOW);
5618 OPCODE(UNPACKL_LOW);
5619 OPCODE(VSHL_BY_SCALAR);
5620 OPCODE(VSRL_BY_SCALAR);
5621 OPCODE(VSRA_BY_SCALAR);
5622 OPCODE(VSUM);
5623 OPCODE(VICMPE);
5624 OPCODE(VICMPH);
5625 OPCODE(VICMPHL);
5626 OPCODE(VICMPES);
5627 OPCODE(VICMPHS);
5628 OPCODE(VICMPHLS);
5629 OPCODE(VFCMPE);
5630 OPCODE(STRICT_VFCMPE);
5631 OPCODE(STRICT_VFCMPES);
5632 OPCODE(VFCMPH);
5633 OPCODE(STRICT_VFCMPH);
5634 OPCODE(STRICT_VFCMPHS);
5635 OPCODE(VFCMPHE);
5636 OPCODE(STRICT_VFCMPHE);
5637 OPCODE(STRICT_VFCMPHES);
5638 OPCODE(VFCMPES);
5639 OPCODE(VFCMPHS);
5640 OPCODE(VFCMPHES);
5641 OPCODE(VFTCI);
5642 OPCODE(VEXTEND);
5643 OPCODE(STRICT_VEXTEND);
5644 OPCODE(VROUND);
5645 OPCODE(STRICT_VROUND);
5646 OPCODE(VTM);
5647 OPCODE(VFAE_CC);
5648 OPCODE(VFAEZ_CC);
5649 OPCODE(VFEE_CC);
5650 OPCODE(VFEEZ_CC);
5651 OPCODE(VFENE_CC);
5652 OPCODE(VFENEZ_CC);
5653 OPCODE(VISTR_CC);
5654 OPCODE(VSTRC_CC);
5655 OPCODE(VSTRCZ_CC);
5656 OPCODE(VSTRS_CC);
5657 OPCODE(VSTRSZ_CC);
5658 OPCODE(TDC);
5659 OPCODE(ATOMIC_SWAPW);
5660 OPCODE(ATOMIC_LOADW_ADD);
5661 OPCODE(ATOMIC_LOADW_SUB);
5662 OPCODE(ATOMIC_LOADW_AND);
5663 OPCODE(ATOMIC_LOADW_OR);
5664 OPCODE(ATOMIC_LOADW_XOR);
5665 OPCODE(ATOMIC_LOADW_NAND);
5666 OPCODE(ATOMIC_LOADW_MIN);
5667 OPCODE(ATOMIC_LOADW_MAX);
5668 OPCODE(ATOMIC_LOADW_UMIN);
5669 OPCODE(ATOMIC_LOADW_UMAX);
5670 OPCODE(ATOMIC_CMP_SWAPW);
5671 OPCODE(ATOMIC_CMP_SWAP);
5672 OPCODE(ATOMIC_LOAD_128);
5673 OPCODE(ATOMIC_STORE_128);
5674 OPCODE(ATOMIC_CMP_SWAP_128);
5675 OPCODE(LRV);
5676 OPCODE(STRV);
5677 OPCODE(VLER);
5678 OPCODE(VSTER);
5679 OPCODE(PREFETCH);
5680 }
5681 return nullptr;
5682 #undef OPCODE
5683 }
5684
5685 // Return true if VT is a vector whose elements are a whole number of bytes
5686 // in width. Also check for presence of vector support.
canTreatAsByteVector(EVT VT) const5687 bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const {
5688 if (!Subtarget.hasVector())
5689 return false;
5690
5691 return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple();
5692 }
5693
5694 // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
5695 // producing a result of type ResVT. Op is a possibly bitcast version
5696 // of the input vector and Index is the index (based on type VecVT) that
5697 // should be extracted. Return the new extraction if a simplification
5698 // was possible or if Force is true.
combineExtract(const SDLoc & DL,EVT ResVT,EVT VecVT,SDValue Op,unsigned Index,DAGCombinerInfo & DCI,bool Force) const5699 SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
5700 EVT VecVT, SDValue Op,
5701 unsigned Index,
5702 DAGCombinerInfo &DCI,
5703 bool Force) const {
5704 SelectionDAG &DAG = DCI.DAG;
5705
5706 // The number of bytes being extracted.
5707 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
5708
5709 for (;;) {
5710 unsigned Opcode = Op.getOpcode();
5711 if (Opcode == ISD::BITCAST)
5712 // Look through bitcasts.
5713 Op = Op.getOperand(0);
5714 else if ((Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) &&
5715 canTreatAsByteVector(Op.getValueType())) {
5716 // Get a VPERM-like permute mask and see whether the bytes covered
5717 // by the extracted element are a contiguous sequence from one
5718 // source operand.
5719 SmallVector<int, SystemZ::VectorBytes> Bytes;
5720 if (!getVPermMask(Op, Bytes))
5721 break;
5722 int First;
5723 if (!getShuffleInput(Bytes, Index * BytesPerElement,
5724 BytesPerElement, First))
5725 break;
5726 if (First < 0)
5727 return DAG.getUNDEF(ResVT);
5728 // Make sure the contiguous sequence starts at a multiple of the
5729 // original element size.
5730 unsigned Byte = unsigned(First) % Bytes.size();
5731 if (Byte % BytesPerElement != 0)
5732 break;
5733 // We can get the extracted value directly from an input.
5734 Index = Byte / BytesPerElement;
5735 Op = Op.getOperand(unsigned(First) / Bytes.size());
5736 Force = true;
5737 } else if (Opcode == ISD::BUILD_VECTOR &&
5738 canTreatAsByteVector(Op.getValueType())) {
5739 // We can only optimize this case if the BUILD_VECTOR elements are
5740 // at least as wide as the extracted value.
5741 EVT OpVT = Op.getValueType();
5742 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
5743 if (OpBytesPerElement < BytesPerElement)
5744 break;
5745 // Make sure that the least-significant bit of the extracted value
5746 // is the least significant bit of an input.
5747 unsigned End = (Index + 1) * BytesPerElement;
5748 if (End % OpBytesPerElement != 0)
5749 break;
5750 // We're extracting the low part of one operand of the BUILD_VECTOR.
5751 Op = Op.getOperand(End / OpBytesPerElement - 1);
5752 if (!Op.getValueType().isInteger()) {
5753 EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits());
5754 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
5755 DCI.AddToWorklist(Op.getNode());
5756 }
5757 EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits());
5758 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
5759 if (VT != ResVT) {
5760 DCI.AddToWorklist(Op.getNode());
5761 Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op);
5762 }
5763 return Op;
5764 } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
5765 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
5766 Opcode == ISD::ANY_EXTEND_VECTOR_INREG) &&
5767 canTreatAsByteVector(Op.getValueType()) &&
5768 canTreatAsByteVector(Op.getOperand(0).getValueType())) {
5769 // Make sure that only the unextended bits are significant.
5770 EVT ExtVT = Op.getValueType();
5771 EVT OpVT = Op.getOperand(0).getValueType();
5772 unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize();
5773 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
5774 unsigned Byte = Index * BytesPerElement;
5775 unsigned SubByte = Byte % ExtBytesPerElement;
5776 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
5777 if (SubByte < MinSubByte ||
5778 SubByte + BytesPerElement > ExtBytesPerElement)
5779 break;
5780 // Get the byte offset of the unextended element
5781 Byte = Byte / ExtBytesPerElement * OpBytesPerElement;
5782 // ...then add the byte offset relative to that element.
5783 Byte += SubByte - MinSubByte;
5784 if (Byte % BytesPerElement != 0)
5785 break;
5786 Op = Op.getOperand(0);
5787 Index = Byte / BytesPerElement;
5788 Force = true;
5789 } else
5790 break;
5791 }
5792 if (Force) {
5793 if (Op.getValueType() != VecVT) {
5794 Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op);
5795 DCI.AddToWorklist(Op.getNode());
5796 }
5797 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op,
5798 DAG.getConstant(Index, DL, MVT::i32));
5799 }
5800 return SDValue();
5801 }
5802
5803 // Optimize vector operations in scalar value Op on the basis that Op
5804 // is truncated to TruncVT.
combineTruncateExtract(const SDLoc & DL,EVT TruncVT,SDValue Op,DAGCombinerInfo & DCI) const5805 SDValue SystemZTargetLowering::combineTruncateExtract(
5806 const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const {
5807 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into
5808 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
5809 // of type TruncVT.
5810 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5811 TruncVT.getSizeInBits() % 8 == 0) {
5812 SDValue Vec = Op.getOperand(0);
5813 EVT VecVT = Vec.getValueType();
5814 if (canTreatAsByteVector(VecVT)) {
5815 if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
5816 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
5817 unsigned TruncBytes = TruncVT.getStoreSize();
5818 if (BytesPerElement % TruncBytes == 0) {
5819 // Calculate the value of Y' in the above description. We are
5820 // splitting the original elements into Scale equal-sized pieces
5821 // and for truncation purposes want the last (least-significant)
5822 // of these pieces for IndexN. This is easiest to do by calculating
5823 // the start index of the following element and then subtracting 1.
5824 unsigned Scale = BytesPerElement / TruncBytes;
5825 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
5826
5827 // Defer the creation of the bitcast from X to combineExtract,
5828 // which might be able to optimize the extraction.
5829 VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8),
5830 VecVT.getStoreSize() / TruncBytes);
5831 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
5832 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true);
5833 }
5834 }
5835 }
5836 }
5837 return SDValue();
5838 }
5839
combineZERO_EXTEND(SDNode * N,DAGCombinerInfo & DCI) const5840 SDValue SystemZTargetLowering::combineZERO_EXTEND(
5841 SDNode *N, DAGCombinerInfo &DCI) const {
5842 // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2')
5843 SelectionDAG &DAG = DCI.DAG;
5844 SDValue N0 = N->getOperand(0);
5845 EVT VT = N->getValueType(0);
5846 if (N0.getOpcode() == SystemZISD::SELECT_CCMASK) {
5847 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.getOperand(0));
5848 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5849 if (TrueOp && FalseOp) {
5850 SDLoc DL(N0);
5851 SDValue Ops[] = { DAG.getConstant(TrueOp->getZExtValue(), DL, VT),
5852 DAG.getConstant(FalseOp->getZExtValue(), DL, VT),
5853 N0.getOperand(2), N0.getOperand(3), N0.getOperand(4) };
5854 SDValue NewSelect = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VT, Ops);
5855 // If N0 has multiple uses, change other uses as well.
5856 if (!N0.hasOneUse()) {
5857 SDValue TruncSelect =
5858 DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), NewSelect);
5859 DCI.CombineTo(N0.getNode(), TruncSelect);
5860 }
5861 return NewSelect;
5862 }
5863 }
5864 return SDValue();
5865 }
5866
combineSIGN_EXTEND_INREG(SDNode * N,DAGCombinerInfo & DCI) const5867 SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
5868 SDNode *N, DAGCombinerInfo &DCI) const {
5869 // Convert (sext_in_reg (setcc LHS, RHS, COND), i1)
5870 // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1)
5871 // into (select_cc LHS, RHS, -1, 0, COND)
5872 SelectionDAG &DAG = DCI.DAG;
5873 SDValue N0 = N->getOperand(0);
5874 EVT VT = N->getValueType(0);
5875 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
5876 if (N0.hasOneUse() && N0.getOpcode() == ISD::ANY_EXTEND)
5877 N0 = N0.getOperand(0);
5878 if (EVT == MVT::i1 && N0.hasOneUse() && N0.getOpcode() == ISD::SETCC) {
5879 SDLoc DL(N0);
5880 SDValue Ops[] = { N0.getOperand(0), N0.getOperand(1),
5881 DAG.getConstant(-1, DL, VT), DAG.getConstant(0, DL, VT),
5882 N0.getOperand(2) };
5883 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
5884 }
5885 return SDValue();
5886 }
5887
combineSIGN_EXTEND(SDNode * N,DAGCombinerInfo & DCI) const5888 SDValue SystemZTargetLowering::combineSIGN_EXTEND(
5889 SDNode *N, DAGCombinerInfo &DCI) const {
5890 // Convert (sext (ashr (shl X, C1), C2)) to
5891 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
5892 // cheap as narrower ones.
5893 SelectionDAG &DAG = DCI.DAG;
5894 SDValue N0 = N->getOperand(0);
5895 EVT VT = N->getValueType(0);
5896 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
5897 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5898 SDValue Inner = N0.getOperand(0);
5899 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
5900 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
5901 unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits());
5902 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
5903 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
5904 EVT ShiftVT = N0.getOperand(1).getValueType();
5905 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
5906 Inner.getOperand(0));
5907 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
5908 DAG.getConstant(NewShlAmt, SDLoc(Inner),
5909 ShiftVT));
5910 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
5911 DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
5912 }
5913 }
5914 }
5915 return SDValue();
5916 }
5917
combineMERGE(SDNode * N,DAGCombinerInfo & DCI) const5918 SDValue SystemZTargetLowering::combineMERGE(
5919 SDNode *N, DAGCombinerInfo &DCI) const {
5920 SelectionDAG &DAG = DCI.DAG;
5921 unsigned Opcode = N->getOpcode();
5922 SDValue Op0 = N->getOperand(0);
5923 SDValue Op1 = N->getOperand(1);
5924 if (Op0.getOpcode() == ISD::BITCAST)
5925 Op0 = Op0.getOperand(0);
5926 if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
5927 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
5928 // for v4f32.
5929 if (Op1 == N->getOperand(0))
5930 return Op1;
5931 // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
5932 EVT VT = Op1.getValueType();
5933 unsigned ElemBytes = VT.getVectorElementType().getStoreSize();
5934 if (ElemBytes <= 4) {
5935 Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
5936 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
5937 EVT InVT = VT.changeVectorElementTypeToInteger();
5938 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16),
5939 SystemZ::VectorBytes / ElemBytes / 2);
5940 if (VT != InVT) {
5941 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1);
5942 DCI.AddToWorklist(Op1.getNode());
5943 }
5944 SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1);
5945 DCI.AddToWorklist(Op.getNode());
5946 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
5947 }
5948 }
5949 return SDValue();
5950 }
5951
combineLOAD(SDNode * N,DAGCombinerInfo & DCI) const5952 SDValue SystemZTargetLowering::combineLOAD(
5953 SDNode *N, DAGCombinerInfo &DCI) const {
5954 SelectionDAG &DAG = DCI.DAG;
5955 EVT LdVT = N->getValueType(0);
5956 if (LdVT.isVector() || LdVT.isInteger())
5957 return SDValue();
5958 // Transform a scalar load that is REPLICATEd as well as having other
5959 // use(s) to the form where the other use(s) use the first element of the
5960 // REPLICATE instead of the load. Otherwise instruction selection will not
5961 // produce a VLREP. Avoid extracting to a GPR, so only do this for floating
5962 // point loads.
5963
5964 SDValue Replicate;
5965 SmallVector<SDNode*, 8> OtherUses;
5966 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5967 UI != UE; ++UI) {
5968 if (UI->getOpcode() == SystemZISD::REPLICATE) {
5969 if (Replicate)
5970 return SDValue(); // Should never happen
5971 Replicate = SDValue(*UI, 0);
5972 }
5973 else if (UI.getUse().getResNo() == 0)
5974 OtherUses.push_back(*UI);
5975 }
5976 if (!Replicate || OtherUses.empty())
5977 return SDValue();
5978
5979 SDLoc DL(N);
5980 SDValue Extract0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, LdVT,
5981 Replicate, DAG.getConstant(0, DL, MVT::i32));
5982 // Update uses of the loaded Value while preserving old chains.
5983 for (SDNode *U : OtherUses) {
5984 SmallVector<SDValue, 8> Ops;
5985 for (SDValue Op : U->ops())
5986 Ops.push_back((Op.getNode() == N && Op.getResNo() == 0) ? Extract0 : Op);
5987 DAG.UpdateNodeOperands(U, Ops);
5988 }
5989 return SDValue(N, 0);
5990 }
5991
canLoadStoreByteSwapped(EVT VT) const5992 bool SystemZTargetLowering::canLoadStoreByteSwapped(EVT VT) const {
5993 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
5994 return true;
5995 if (Subtarget.hasVectorEnhancements2())
5996 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64)
5997 return true;
5998 return false;
5999 }
6000
isVectorElementSwap(ArrayRef<int> M,EVT VT)6001 static bool isVectorElementSwap(ArrayRef<int> M, EVT VT) {
6002 if (!VT.isVector() || !VT.isSimple() ||
6003 VT.getSizeInBits() != 128 ||
6004 VT.getScalarSizeInBits() % 8 != 0)
6005 return false;
6006
6007 unsigned NumElts = VT.getVectorNumElements();
6008 for (unsigned i = 0; i < NumElts; ++i) {
6009 if (M[i] < 0) continue; // ignore UNDEF indices
6010 if ((unsigned) M[i] != NumElts - 1 - i)
6011 return false;
6012 }
6013
6014 return true;
6015 }
6016
combineSTORE(SDNode * N,DAGCombinerInfo & DCI) const6017 SDValue SystemZTargetLowering::combineSTORE(
6018 SDNode *N, DAGCombinerInfo &DCI) const {
6019 SelectionDAG &DAG = DCI.DAG;
6020 auto *SN = cast<StoreSDNode>(N);
6021 auto &Op1 = N->getOperand(1);
6022 EVT MemVT = SN->getMemoryVT();
6023 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
6024 // for the extraction to be done on a vMiN value, so that we can use VSTE.
6025 // If X has wider elements then convert it to:
6026 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
6027 if (MemVT.isInteger() && SN->isTruncatingStore()) {
6028 if (SDValue Value =
6029 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) {
6030 DCI.AddToWorklist(Value.getNode());
6031
6032 // Rewrite the store with the new form of stored value.
6033 return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value,
6034 SN->getBasePtr(), SN->getMemoryVT(),
6035 SN->getMemOperand());
6036 }
6037 }
6038 // Combine STORE (BSWAP) into STRVH/STRV/STRVG/VSTBR
6039 if (!SN->isTruncatingStore() &&
6040 Op1.getOpcode() == ISD::BSWAP &&
6041 Op1.getNode()->hasOneUse() &&
6042 canLoadStoreByteSwapped(Op1.getValueType())) {
6043
6044 SDValue BSwapOp = Op1.getOperand(0);
6045
6046 if (BSwapOp.getValueType() == MVT::i16)
6047 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp);
6048
6049 SDValue Ops[] = {
6050 N->getOperand(0), BSwapOp, N->getOperand(2)
6051 };
6052
6053 return
6054 DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other),
6055 Ops, MemVT, SN->getMemOperand());
6056 }
6057 // Combine STORE (element-swap) into VSTER
6058 if (!SN->isTruncatingStore() &&
6059 Op1.getOpcode() == ISD::VECTOR_SHUFFLE &&
6060 Op1.getNode()->hasOneUse() &&
6061 Subtarget.hasVectorEnhancements2()) {
6062 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op1.getNode());
6063 ArrayRef<int> ShuffleMask = SVN->getMask();
6064 if (isVectorElementSwap(ShuffleMask, Op1.getValueType())) {
6065 SDValue Ops[] = {
6066 N->getOperand(0), Op1.getOperand(0), N->getOperand(2)
6067 };
6068
6069 return DAG.getMemIntrinsicNode(SystemZISD::VSTER, SDLoc(N),
6070 DAG.getVTList(MVT::Other),
6071 Ops, MemVT, SN->getMemOperand());
6072 }
6073 }
6074
6075 return SDValue();
6076 }
6077
combineVECTOR_SHUFFLE(SDNode * N,DAGCombinerInfo & DCI) const6078 SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
6079 SDNode *N, DAGCombinerInfo &DCI) const {
6080 SelectionDAG &DAG = DCI.DAG;
6081 // Combine element-swap (LOAD) into VLER
6082 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
6083 N->getOperand(0).hasOneUse() &&
6084 Subtarget.hasVectorEnhancements2()) {
6085 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
6086 ArrayRef<int> ShuffleMask = SVN->getMask();
6087 if (isVectorElementSwap(ShuffleMask, N->getValueType(0))) {
6088 SDValue Load = N->getOperand(0);
6089 LoadSDNode *LD = cast<LoadSDNode>(Load);
6090
6091 // Create the element-swapping load.
6092 SDValue Ops[] = {
6093 LD->getChain(), // Chain
6094 LD->getBasePtr() // Ptr
6095 };
6096 SDValue ESLoad =
6097 DAG.getMemIntrinsicNode(SystemZISD::VLER, SDLoc(N),
6098 DAG.getVTList(LD->getValueType(0), MVT::Other),
6099 Ops, LD->getMemoryVT(), LD->getMemOperand());
6100
6101 // First, combine the VECTOR_SHUFFLE away. This makes the value produced
6102 // by the load dead.
6103 DCI.CombineTo(N, ESLoad);
6104
6105 // Next, combine the load away, we give it a bogus result value but a real
6106 // chain result. The result value is dead because the shuffle is dead.
6107 DCI.CombineTo(Load.getNode(), ESLoad, ESLoad.getValue(1));
6108
6109 // Return N so it doesn't get rechecked!
6110 return SDValue(N, 0);
6111 }
6112 }
6113
6114 return SDValue();
6115 }
6116
combineEXTRACT_VECTOR_ELT(SDNode * N,DAGCombinerInfo & DCI) const6117 SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
6118 SDNode *N, DAGCombinerInfo &DCI) const {
6119 SelectionDAG &DAG = DCI.DAG;
6120
6121 if (!Subtarget.hasVector())
6122 return SDValue();
6123
6124 // Look through bitcasts that retain the number of vector elements.
6125 SDValue Op = N->getOperand(0);
6126 if (Op.getOpcode() == ISD::BITCAST &&
6127 Op.getValueType().isVector() &&
6128 Op.getOperand(0).getValueType().isVector() &&
6129 Op.getValueType().getVectorNumElements() ==
6130 Op.getOperand(0).getValueType().getVectorNumElements())
6131 Op = Op.getOperand(0);
6132
6133 // Pull BSWAP out of a vector extraction.
6134 if (Op.getOpcode() == ISD::BSWAP && Op.hasOneUse()) {
6135 EVT VecVT = Op.getValueType();
6136 EVT EltVT = VecVT.getVectorElementType();
6137 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), EltVT,
6138 Op.getOperand(0), N->getOperand(1));
6139 DCI.AddToWorklist(Op.getNode());
6140 Op = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Op);
6141 if (EltVT != N->getValueType(0)) {
6142 DCI.AddToWorklist(Op.getNode());
6143 Op = DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Op);
6144 }
6145 return Op;
6146 }
6147
6148 // Try to simplify a vector extraction.
6149 if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
6150 SDValue Op0 = N->getOperand(0);
6151 EVT VecVT = Op0.getValueType();
6152 return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0,
6153 IndexN->getZExtValue(), DCI, false);
6154 }
6155 return SDValue();
6156 }
6157
combineJOIN_DWORDS(SDNode * N,DAGCombinerInfo & DCI) const6158 SDValue SystemZTargetLowering::combineJOIN_DWORDS(
6159 SDNode *N, DAGCombinerInfo &DCI) const {
6160 SelectionDAG &DAG = DCI.DAG;
6161 // (join_dwords X, X) == (replicate X)
6162 if (N->getOperand(0) == N->getOperand(1))
6163 return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0),
6164 N->getOperand(0));
6165 return SDValue();
6166 }
6167
MergeInputChains(SDNode * N1,SDNode * N2)6168 static SDValue MergeInputChains(SDNode *N1, SDNode *N2) {
6169 SDValue Chain1 = N1->getOperand(0);
6170 SDValue Chain2 = N2->getOperand(0);
6171
6172 // Trivial case: both nodes take the same chain.
6173 if (Chain1 == Chain2)
6174 return Chain1;
6175
6176 // FIXME - we could handle more complex cases via TokenFactor,
6177 // assuming we can verify that this would not create a cycle.
6178 return SDValue();
6179 }
6180
combineFP_ROUND(SDNode * N,DAGCombinerInfo & DCI) const6181 SDValue SystemZTargetLowering::combineFP_ROUND(
6182 SDNode *N, DAGCombinerInfo &DCI) const {
6183
6184 if (!Subtarget.hasVector())
6185 return SDValue();
6186
6187 // (fpround (extract_vector_elt X 0))
6188 // (fpround (extract_vector_elt X 1)) ->
6189 // (extract_vector_elt (VROUND X) 0)
6190 // (extract_vector_elt (VROUND X) 2)
6191 //
6192 // This is a special case since the target doesn't really support v2f32s.
6193 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0;
6194 SelectionDAG &DAG = DCI.DAG;
6195 SDValue Op0 = N->getOperand(OpNo);
6196 if (N->getValueType(0) == MVT::f32 &&
6197 Op0.hasOneUse() &&
6198 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6199 Op0.getOperand(0).getValueType() == MVT::v2f64 &&
6200 Op0.getOperand(1).getOpcode() == ISD::Constant &&
6201 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
6202 SDValue Vec = Op0.getOperand(0);
6203 for (auto *U : Vec->uses()) {
6204 if (U != Op0.getNode() &&
6205 U->hasOneUse() &&
6206 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6207 U->getOperand(0) == Vec &&
6208 U->getOperand(1).getOpcode() == ISD::Constant &&
6209 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
6210 SDValue OtherRound = SDValue(*U->use_begin(), 0);
6211 if (OtherRound.getOpcode() == N->getOpcode() &&
6212 OtherRound.getOperand(OpNo) == SDValue(U, 0) &&
6213 OtherRound.getValueType() == MVT::f32) {
6214 SDValue VRound, Chain;
6215 if (N->isStrictFPOpcode()) {
6216 Chain = MergeInputChains(N, OtherRound.getNode());
6217 if (!Chain)
6218 continue;
6219 VRound = DAG.getNode(SystemZISD::STRICT_VROUND, SDLoc(N),
6220 {MVT::v4f32, MVT::Other}, {Chain, Vec});
6221 Chain = VRound.getValue(1);
6222 } else
6223 VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N),
6224 MVT::v4f32, Vec);
6225 DCI.AddToWorklist(VRound.getNode());
6226 SDValue Extract1 =
6227 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32,
6228 VRound, DAG.getConstant(2, SDLoc(U), MVT::i32));
6229 DCI.AddToWorklist(Extract1.getNode());
6230 DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1);
6231 if (Chain)
6232 DAG.ReplaceAllUsesOfValueWith(OtherRound.getValue(1), Chain);
6233 SDValue Extract0 =
6234 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32,
6235 VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
6236 if (Chain)
6237 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op0),
6238 N->getVTList(), Extract0, Chain);
6239 return Extract0;
6240 }
6241 }
6242 }
6243 }
6244 return SDValue();
6245 }
6246
combineFP_EXTEND(SDNode * N,DAGCombinerInfo & DCI) const6247 SDValue SystemZTargetLowering::combineFP_EXTEND(
6248 SDNode *N, DAGCombinerInfo &DCI) const {
6249
6250 if (!Subtarget.hasVector())
6251 return SDValue();
6252
6253 // (fpextend (extract_vector_elt X 0))
6254 // (fpextend (extract_vector_elt X 2)) ->
6255 // (extract_vector_elt (VEXTEND X) 0)
6256 // (extract_vector_elt (VEXTEND X) 1)
6257 //
6258 // This is a special case since the target doesn't really support v2f32s.
6259 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0;
6260 SelectionDAG &DAG = DCI.DAG;
6261 SDValue Op0 = N->getOperand(OpNo);
6262 if (N->getValueType(0) == MVT::f64 &&
6263 Op0.hasOneUse() &&
6264 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6265 Op0.getOperand(0).getValueType() == MVT::v4f32 &&
6266 Op0.getOperand(1).getOpcode() == ISD::Constant &&
6267 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
6268 SDValue Vec = Op0.getOperand(0);
6269 for (auto *U : Vec->uses()) {
6270 if (U != Op0.getNode() &&
6271 U->hasOneUse() &&
6272 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6273 U->getOperand(0) == Vec &&
6274 U->getOperand(1).getOpcode() == ISD::Constant &&
6275 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 2) {
6276 SDValue OtherExtend = SDValue(*U->use_begin(), 0);
6277 if (OtherExtend.getOpcode() == N->getOpcode() &&
6278 OtherExtend.getOperand(OpNo) == SDValue(U, 0) &&
6279 OtherExtend.getValueType() == MVT::f64) {
6280 SDValue VExtend, Chain;
6281 if (N->isStrictFPOpcode()) {
6282 Chain = MergeInputChains(N, OtherExtend.getNode());
6283 if (!Chain)
6284 continue;
6285 VExtend = DAG.getNode(SystemZISD::STRICT_VEXTEND, SDLoc(N),
6286 {MVT::v2f64, MVT::Other}, {Chain, Vec});
6287 Chain = VExtend.getValue(1);
6288 } else
6289 VExtend = DAG.getNode(SystemZISD::VEXTEND, SDLoc(N),
6290 MVT::v2f64, Vec);
6291 DCI.AddToWorklist(VExtend.getNode());
6292 SDValue Extract1 =
6293 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f64,
6294 VExtend, DAG.getConstant(1, SDLoc(U), MVT::i32));
6295 DCI.AddToWorklist(Extract1.getNode());
6296 DAG.ReplaceAllUsesOfValueWith(OtherExtend, Extract1);
6297 if (Chain)
6298 DAG.ReplaceAllUsesOfValueWith(OtherExtend.getValue(1), Chain);
6299 SDValue Extract0 =
6300 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f64,
6301 VExtend, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
6302 if (Chain)
6303 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op0),
6304 N->getVTList(), Extract0, Chain);
6305 return Extract0;
6306 }
6307 }
6308 }
6309 }
6310 return SDValue();
6311 }
6312
combineINT_TO_FP(SDNode * N,DAGCombinerInfo & DCI) const6313 SDValue SystemZTargetLowering::combineINT_TO_FP(
6314 SDNode *N, DAGCombinerInfo &DCI) const {
6315 if (DCI.Level != BeforeLegalizeTypes)
6316 return SDValue();
6317 unsigned Opcode = N->getOpcode();
6318 EVT OutVT = N->getValueType(0);
6319 SelectionDAG &DAG = DCI.DAG;
6320 SDValue Op = N->getOperand(0);
6321 unsigned OutScalarBits = OutVT.getScalarSizeInBits();
6322 unsigned InScalarBits = Op->getValueType(0).getScalarSizeInBits();
6323
6324 // Insert an extension before type-legalization to avoid scalarization, e.g.:
6325 // v2f64 = uint_to_fp v2i16
6326 // =>
6327 // v2f64 = uint_to_fp (v2i64 zero_extend v2i16)
6328 if (OutVT.isVector() && OutScalarBits > InScalarBits) {
6329 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(OutVT.getScalarSizeInBits()),
6330 OutVT.getVectorNumElements());
6331 unsigned ExtOpcode =
6332 (Opcode == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND);
6333 SDValue ExtOp = DAG.getNode(ExtOpcode, SDLoc(N), ExtVT, Op);
6334 return DAG.getNode(Opcode, SDLoc(N), OutVT, ExtOp);
6335 }
6336 return SDValue();
6337 }
6338
combineBSWAP(SDNode * N,DAGCombinerInfo & DCI) const6339 SDValue SystemZTargetLowering::combineBSWAP(
6340 SDNode *N, DAGCombinerInfo &DCI) const {
6341 SelectionDAG &DAG = DCI.DAG;
6342 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG/VLBR
6343 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
6344 N->getOperand(0).hasOneUse() &&
6345 canLoadStoreByteSwapped(N->getValueType(0))) {
6346 SDValue Load = N->getOperand(0);
6347 LoadSDNode *LD = cast<LoadSDNode>(Load);
6348
6349 // Create the byte-swapping load.
6350 SDValue Ops[] = {
6351 LD->getChain(), // Chain
6352 LD->getBasePtr() // Ptr
6353 };
6354 EVT LoadVT = N->getValueType(0);
6355 if (LoadVT == MVT::i16)
6356 LoadVT = MVT::i32;
6357 SDValue BSLoad =
6358 DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N),
6359 DAG.getVTList(LoadVT, MVT::Other),
6360 Ops, LD->getMemoryVT(), LD->getMemOperand());
6361
6362 // If this is an i16 load, insert the truncate.
6363 SDValue ResVal = BSLoad;
6364 if (N->getValueType(0) == MVT::i16)
6365 ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad);
6366
6367 // First, combine the bswap away. This makes the value produced by the
6368 // load dead.
6369 DCI.CombineTo(N, ResVal);
6370
6371 // Next, combine the load away, we give it a bogus result value but a real
6372 // chain result. The result value is dead because the bswap is dead.
6373 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
6374
6375 // Return N so it doesn't get rechecked!
6376 return SDValue(N, 0);
6377 }
6378
6379 // Look through bitcasts that retain the number of vector elements.
6380 SDValue Op = N->getOperand(0);
6381 if (Op.getOpcode() == ISD::BITCAST &&
6382 Op.getValueType().isVector() &&
6383 Op.getOperand(0).getValueType().isVector() &&
6384 Op.getValueType().getVectorNumElements() ==
6385 Op.getOperand(0).getValueType().getVectorNumElements())
6386 Op = Op.getOperand(0);
6387
6388 // Push BSWAP into a vector insertion if at least one side then simplifies.
6389 if (Op.getOpcode() == ISD::INSERT_VECTOR_ELT && Op.hasOneUse()) {
6390 SDValue Vec = Op.getOperand(0);
6391 SDValue Elt = Op.getOperand(1);
6392 SDValue Idx = Op.getOperand(2);
6393
6394 if (DAG.isConstantIntBuildVectorOrConstantInt(Vec) ||
6395 Vec.getOpcode() == ISD::BSWAP || Vec.isUndef() ||
6396 DAG.isConstantIntBuildVectorOrConstantInt(Elt) ||
6397 Elt.getOpcode() == ISD::BSWAP || Elt.isUndef() ||
6398 (canLoadStoreByteSwapped(N->getValueType(0)) &&
6399 ISD::isNON_EXTLoad(Elt.getNode()) && Elt.hasOneUse())) {
6400 EVT VecVT = N->getValueType(0);
6401 EVT EltVT = N->getValueType(0).getVectorElementType();
6402 if (VecVT != Vec.getValueType()) {
6403 Vec = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Vec);
6404 DCI.AddToWorklist(Vec.getNode());
6405 }
6406 if (EltVT != Elt.getValueType()) {
6407 Elt = DAG.getNode(ISD::BITCAST, SDLoc(N), EltVT, Elt);
6408 DCI.AddToWorklist(Elt.getNode());
6409 }
6410 Vec = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Vec);
6411 DCI.AddToWorklist(Vec.getNode());
6412 Elt = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Elt);
6413 DCI.AddToWorklist(Elt.getNode());
6414 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), VecVT,
6415 Vec, Elt, Idx);
6416 }
6417 }
6418
6419 // Push BSWAP into a vector shuffle if at least one side then simplifies.
6420 ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(Op);
6421 if (SV && Op.hasOneUse()) {
6422 SDValue Op0 = Op.getOperand(0);
6423 SDValue Op1 = Op.getOperand(1);
6424
6425 if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
6426 Op0.getOpcode() == ISD::BSWAP || Op0.isUndef() ||
6427 DAG.isConstantIntBuildVectorOrConstantInt(Op1) ||
6428 Op1.getOpcode() == ISD::BSWAP || Op1.isUndef()) {
6429 EVT VecVT = N->getValueType(0);
6430 if (VecVT != Op0.getValueType()) {
6431 Op0 = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Op0);
6432 DCI.AddToWorklist(Op0.getNode());
6433 }
6434 if (VecVT != Op1.getValueType()) {
6435 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Op1);
6436 DCI.AddToWorklist(Op1.getNode());
6437 }
6438 Op0 = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Op0);
6439 DCI.AddToWorklist(Op0.getNode());
6440 Op1 = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Op1);
6441 DCI.AddToWorklist(Op1.getNode());
6442 return DAG.getVectorShuffle(VecVT, SDLoc(N), Op0, Op1, SV->getMask());
6443 }
6444 }
6445
6446 return SDValue();
6447 }
6448
combineCCMask(SDValue & CCReg,int & CCValid,int & CCMask)6449 static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
6450 // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
6451 // set by the CCReg instruction using the CCValid / CCMask masks,
6452 // If the CCReg instruction is itself a ICMP testing the condition
6453 // code set by some other instruction, see whether we can directly
6454 // use that condition code.
6455
6456 // Verify that we have an ICMP against some constant.
6457 if (CCValid != SystemZ::CCMASK_ICMP)
6458 return false;
6459 auto *ICmp = CCReg.getNode();
6460 if (ICmp->getOpcode() != SystemZISD::ICMP)
6461 return false;
6462 auto *CompareLHS = ICmp->getOperand(0).getNode();
6463 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
6464 if (!CompareRHS)
6465 return false;
6466
6467 // Optimize the case where CompareLHS is a SELECT_CCMASK.
6468 if (CompareLHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
6469 // Verify that we have an appropriate mask for a EQ or NE comparison.
6470 bool Invert = false;
6471 if (CCMask == SystemZ::CCMASK_CMP_NE)
6472 Invert = !Invert;
6473 else if (CCMask != SystemZ::CCMASK_CMP_EQ)
6474 return false;
6475
6476 // Verify that the ICMP compares against one of select values.
6477 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
6478 if (!TrueVal)
6479 return false;
6480 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
6481 if (!FalseVal)
6482 return false;
6483 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
6484 Invert = !Invert;
6485 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
6486 return false;
6487
6488 // Compute the effective CC mask for the new branch or select.
6489 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
6490 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
6491 if (!NewCCValid || !NewCCMask)
6492 return false;
6493 CCValid = NewCCValid->getZExtValue();
6494 CCMask = NewCCMask->getZExtValue();
6495 if (Invert)
6496 CCMask ^= CCValid;
6497
6498 // Return the updated CCReg link.
6499 CCReg = CompareLHS->getOperand(4);
6500 return true;
6501 }
6502
6503 // Optimize the case where CompareRHS is (SRA (SHL (IPM))).
6504 if (CompareLHS->getOpcode() == ISD::SRA) {
6505 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
6506 if (!SRACount || SRACount->getZExtValue() != 30)
6507 return false;
6508 auto *SHL = CompareLHS->getOperand(0).getNode();
6509 if (SHL->getOpcode() != ISD::SHL)
6510 return false;
6511 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
6512 if (!SHLCount || SHLCount->getZExtValue() != 30 - SystemZ::IPM_CC)
6513 return false;
6514 auto *IPM = SHL->getOperand(0).getNode();
6515 if (IPM->getOpcode() != SystemZISD::IPM)
6516 return false;
6517
6518 // Avoid introducing CC spills (because SRA would clobber CC).
6519 if (!CompareLHS->hasOneUse())
6520 return false;
6521 // Verify that the ICMP compares against zero.
6522 if (CompareRHS->getZExtValue() != 0)
6523 return false;
6524
6525 // Compute the effective CC mask for the new branch or select.
6526 CCMask = SystemZ::reverseCCMask(CCMask);
6527
6528 // Return the updated CCReg link.
6529 CCReg = IPM->getOperand(0);
6530 return true;
6531 }
6532
6533 return false;
6534 }
6535
combineBR_CCMASK(SDNode * N,DAGCombinerInfo & DCI) const6536 SDValue SystemZTargetLowering::combineBR_CCMASK(
6537 SDNode *N, DAGCombinerInfo &DCI) const {
6538 SelectionDAG &DAG = DCI.DAG;
6539
6540 // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK.
6541 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1));
6542 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2));
6543 if (!CCValid || !CCMask)
6544 return SDValue();
6545
6546 int CCValidVal = CCValid->getZExtValue();
6547 int CCMaskVal = CCMask->getZExtValue();
6548 SDValue Chain = N->getOperand(0);
6549 SDValue CCReg = N->getOperand(4);
6550
6551 if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
6552 return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0),
6553 Chain,
6554 DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32),
6555 DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32),
6556 N->getOperand(3), CCReg);
6557 return SDValue();
6558 }
6559
combineSELECT_CCMASK(SDNode * N,DAGCombinerInfo & DCI) const6560 SDValue SystemZTargetLowering::combineSELECT_CCMASK(
6561 SDNode *N, DAGCombinerInfo &DCI) const {
6562 SelectionDAG &DAG = DCI.DAG;
6563
6564 // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK.
6565 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(2));
6566 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(3));
6567 if (!CCValid || !CCMask)
6568 return SDValue();
6569
6570 int CCValidVal = CCValid->getZExtValue();
6571 int CCMaskVal = CCMask->getZExtValue();
6572 SDValue CCReg = N->getOperand(4);
6573
6574 if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
6575 return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
6576 N->getOperand(0), N->getOperand(1),
6577 DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32),
6578 DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32),
6579 CCReg);
6580 return SDValue();
6581 }
6582
6583
combineGET_CCMASK(SDNode * N,DAGCombinerInfo & DCI) const6584 SDValue SystemZTargetLowering::combineGET_CCMASK(
6585 SDNode *N, DAGCombinerInfo &DCI) const {
6586
6587 // Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible
6588 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1));
6589 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2));
6590 if (!CCValid || !CCMask)
6591 return SDValue();
6592 int CCValidVal = CCValid->getZExtValue();
6593 int CCMaskVal = CCMask->getZExtValue();
6594
6595 SDValue Select = N->getOperand(0);
6596 if (Select->getOpcode() != SystemZISD::SELECT_CCMASK)
6597 return SDValue();
6598
6599 auto *SelectCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2));
6600 auto *SelectCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3));
6601 if (!SelectCCValid || !SelectCCMask)
6602 return SDValue();
6603 int SelectCCValidVal = SelectCCValid->getZExtValue();
6604 int SelectCCMaskVal = SelectCCMask->getZExtValue();
6605
6606 auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0));
6607 auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1));
6608 if (!TrueVal || !FalseVal)
6609 return SDValue();
6610 if (TrueVal->getZExtValue() != 0 && FalseVal->getZExtValue() == 0)
6611 ;
6612 else if (TrueVal->getZExtValue() == 0 && FalseVal->getZExtValue() != 0)
6613 SelectCCMaskVal ^= SelectCCValidVal;
6614 else
6615 return SDValue();
6616
6617 if (SelectCCValidVal & ~CCValidVal)
6618 return SDValue();
6619 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
6620 return SDValue();
6621
6622 return Select->getOperand(4);
6623 }
6624
combineIntDIVREM(SDNode * N,DAGCombinerInfo & DCI) const6625 SDValue SystemZTargetLowering::combineIntDIVREM(
6626 SDNode *N, DAGCombinerInfo &DCI) const {
6627 SelectionDAG &DAG = DCI.DAG;
6628 EVT VT = N->getValueType(0);
6629 // In the case where the divisor is a vector of constants a cheaper
6630 // sequence of instructions can replace the divide. BuildSDIV is called to
6631 // do this during DAG combining, but it only succeeds when it can build a
6632 // multiplication node. The only option for SystemZ is ISD::SMUL_LOHI, and
6633 // since it is not Legal but Custom it can only happen before
6634 // legalization. Therefore we must scalarize this early before Combine
6635 // 1. For widened vectors, this is already the result of type legalization.
6636 if (DCI.Level == BeforeLegalizeTypes && VT.isVector() && isTypeLegal(VT) &&
6637 DAG.isConstantIntBuildVectorOrConstantInt(N->getOperand(1)))
6638 return DAG.UnrollVectorOp(N);
6639 return SDValue();
6640 }
6641
combineINTRINSIC(SDNode * N,DAGCombinerInfo & DCI) const6642 SDValue SystemZTargetLowering::combineINTRINSIC(
6643 SDNode *N, DAGCombinerInfo &DCI) const {
6644 SelectionDAG &DAG = DCI.DAG;
6645
6646 unsigned Id = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
6647 switch (Id) {
6648 // VECTOR LOAD (RIGHTMOST) WITH LENGTH with a length operand of 15
6649 // or larger is simply a vector load.
6650 case Intrinsic::s390_vll:
6651 case Intrinsic::s390_vlrl:
6652 if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(2)))
6653 if (C->getZExtValue() >= 15)
6654 return DAG.getLoad(N->getValueType(0), SDLoc(N), N->getOperand(0),
6655 N->getOperand(3), MachinePointerInfo());
6656 break;
6657 // Likewise for VECTOR STORE (RIGHTMOST) WITH LENGTH.
6658 case Intrinsic::s390_vstl:
6659 case Intrinsic::s390_vstrl:
6660 if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(3)))
6661 if (C->getZExtValue() >= 15)
6662 return DAG.getStore(N->getOperand(0), SDLoc(N), N->getOperand(2),
6663 N->getOperand(4), MachinePointerInfo());
6664 break;
6665 }
6666
6667 return SDValue();
6668 }
6669
unwrapAddress(SDValue N) const6670 SDValue SystemZTargetLowering::unwrapAddress(SDValue N) const {
6671 if (N->getOpcode() == SystemZISD::PCREL_WRAPPER)
6672 return N->getOperand(0);
6673 return N;
6674 }
6675
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const6676 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
6677 DAGCombinerInfo &DCI) const {
6678 switch(N->getOpcode()) {
6679 default: break;
6680 case ISD::ZERO_EXTEND: return combineZERO_EXTEND(N, DCI);
6681 case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI);
6682 case ISD::SIGN_EXTEND_INREG: return combineSIGN_EXTEND_INREG(N, DCI);
6683 case SystemZISD::MERGE_HIGH:
6684 case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI);
6685 case ISD::LOAD: return combineLOAD(N, DCI);
6686 case ISD::STORE: return combineSTORE(N, DCI);
6687 case ISD::VECTOR_SHUFFLE: return combineVECTOR_SHUFFLE(N, DCI);
6688 case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI);
6689 case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI);
6690 case ISD::STRICT_FP_ROUND:
6691 case ISD::FP_ROUND: return combineFP_ROUND(N, DCI);
6692 case ISD::STRICT_FP_EXTEND:
6693 case ISD::FP_EXTEND: return combineFP_EXTEND(N, DCI);
6694 case ISD::SINT_TO_FP:
6695 case ISD::UINT_TO_FP: return combineINT_TO_FP(N, DCI);
6696 case ISD::BSWAP: return combineBSWAP(N, DCI);
6697 case SystemZISD::BR_CCMASK: return combineBR_CCMASK(N, DCI);
6698 case SystemZISD::SELECT_CCMASK: return combineSELECT_CCMASK(N, DCI);
6699 case SystemZISD::GET_CCMASK: return combineGET_CCMASK(N, DCI);
6700 case ISD::SDIV:
6701 case ISD::UDIV:
6702 case ISD::SREM:
6703 case ISD::UREM: return combineIntDIVREM(N, DCI);
6704 case ISD::INTRINSIC_W_CHAIN:
6705 case ISD::INTRINSIC_VOID: return combineINTRINSIC(N, DCI);
6706 }
6707
6708 return SDValue();
6709 }
6710
6711 // Return the demanded elements for the OpNo source operand of Op. DemandedElts
6712 // are for Op.
getDemandedSrcElements(SDValue Op,const APInt & DemandedElts,unsigned OpNo)6713 static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts,
6714 unsigned OpNo) {
6715 EVT VT = Op.getValueType();
6716 unsigned NumElts = (VT.isVector() ? VT.getVectorNumElements() : 1);
6717 APInt SrcDemE;
6718 unsigned Opcode = Op.getOpcode();
6719 if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
6720 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
6721 switch (Id) {
6722 case Intrinsic::s390_vpksh: // PACKS
6723 case Intrinsic::s390_vpksf:
6724 case Intrinsic::s390_vpksg:
6725 case Intrinsic::s390_vpkshs: // PACKS_CC
6726 case Intrinsic::s390_vpksfs:
6727 case Intrinsic::s390_vpksgs:
6728 case Intrinsic::s390_vpklsh: // PACKLS
6729 case Intrinsic::s390_vpklsf:
6730 case Intrinsic::s390_vpklsg:
6731 case Intrinsic::s390_vpklshs: // PACKLS_CC
6732 case Intrinsic::s390_vpklsfs:
6733 case Intrinsic::s390_vpklsgs:
6734 // VECTOR PACK truncates the elements of two source vectors into one.
6735 SrcDemE = DemandedElts;
6736 if (OpNo == 2)
6737 SrcDemE.lshrInPlace(NumElts / 2);
6738 SrcDemE = SrcDemE.trunc(NumElts / 2);
6739 break;
6740 // VECTOR UNPACK extends half the elements of the source vector.
6741 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH
6742 case Intrinsic::s390_vuphh:
6743 case Intrinsic::s390_vuphf:
6744 case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH
6745 case Intrinsic::s390_vuplhh:
6746 case Intrinsic::s390_vuplhf:
6747 SrcDemE = APInt(NumElts * 2, 0);
6748 SrcDemE.insertBits(DemandedElts, 0);
6749 break;
6750 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW
6751 case Intrinsic::s390_vuplhw:
6752 case Intrinsic::s390_vuplf:
6753 case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW
6754 case Intrinsic::s390_vupllh:
6755 case Intrinsic::s390_vupllf:
6756 SrcDemE = APInt(NumElts * 2, 0);
6757 SrcDemE.insertBits(DemandedElts, NumElts);
6758 break;
6759 case Intrinsic::s390_vpdi: {
6760 // VECTOR PERMUTE DWORD IMMEDIATE selects one element from each source.
6761 SrcDemE = APInt(NumElts, 0);
6762 if (!DemandedElts[OpNo - 1])
6763 break;
6764 unsigned Mask = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
6765 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
6766 // Demand input element 0 or 1, given by the mask bit value.
6767 SrcDemE.setBit((Mask & MaskBit)? 1 : 0);
6768 break;
6769 }
6770 case Intrinsic::s390_vsldb: {
6771 // VECTOR SHIFT LEFT DOUBLE BY BYTE
6772 assert(VT == MVT::v16i8 && "Unexpected type.");
6773 unsigned FirstIdx = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
6774 assert (FirstIdx > 0 && FirstIdx < 16 && "Unused operand.");
6775 unsigned NumSrc0Els = 16 - FirstIdx;
6776 SrcDemE = APInt(NumElts, 0);
6777 if (OpNo == 1) {
6778 APInt DemEls = DemandedElts.trunc(NumSrc0Els);
6779 SrcDemE.insertBits(DemEls, FirstIdx);
6780 } else {
6781 APInt DemEls = DemandedElts.lshr(NumSrc0Els);
6782 SrcDemE.insertBits(DemEls, 0);
6783 }
6784 break;
6785 }
6786 case Intrinsic::s390_vperm:
6787 SrcDemE = APInt(NumElts, 1);
6788 break;
6789 default:
6790 llvm_unreachable("Unhandled intrinsic.");
6791 break;
6792 }
6793 } else {
6794 switch (Opcode) {
6795 case SystemZISD::JOIN_DWORDS:
6796 // Scalar operand.
6797 SrcDemE = APInt(1, 1);
6798 break;
6799 case SystemZISD::SELECT_CCMASK:
6800 SrcDemE = DemandedElts;
6801 break;
6802 default:
6803 llvm_unreachable("Unhandled opcode.");
6804 break;
6805 }
6806 }
6807 return SrcDemE;
6808 }
6809
computeKnownBitsBinOp(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth,unsigned OpNo)6810 static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known,
6811 const APInt &DemandedElts,
6812 const SelectionDAG &DAG, unsigned Depth,
6813 unsigned OpNo) {
6814 APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo);
6815 APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1);
6816 KnownBits LHSKnown =
6817 DAG.computeKnownBits(Op.getOperand(OpNo), Src0DemE, Depth + 1);
6818 KnownBits RHSKnown =
6819 DAG.computeKnownBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1);
6820 Known = KnownBits::commonBits(LHSKnown, RHSKnown);
6821 }
6822
6823 void
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const6824 SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
6825 KnownBits &Known,
6826 const APInt &DemandedElts,
6827 const SelectionDAG &DAG,
6828 unsigned Depth) const {
6829 Known.resetAll();
6830
6831 // Intrinsic CC result is returned in the two low bits.
6832 unsigned tmp0, tmp1; // not used
6833 if (Op.getResNo() == 1 && isIntrinsicWithCC(Op, tmp0, tmp1)) {
6834 Known.Zero.setBitsFrom(2);
6835 return;
6836 }
6837 EVT VT = Op.getValueType();
6838 if (Op.getResNo() != 0 || VT == MVT::Untyped)
6839 return;
6840 assert (Known.getBitWidth() == VT.getScalarSizeInBits() &&
6841 "KnownBits does not match VT in bitwidth");
6842 assert ((!VT.isVector() ||
6843 (DemandedElts.getBitWidth() == VT.getVectorNumElements())) &&
6844 "DemandedElts does not match VT number of elements");
6845 unsigned BitWidth = Known.getBitWidth();
6846 unsigned Opcode = Op.getOpcode();
6847 if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
6848 bool IsLogical = false;
6849 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
6850 switch (Id) {
6851 case Intrinsic::s390_vpksh: // PACKS
6852 case Intrinsic::s390_vpksf:
6853 case Intrinsic::s390_vpksg:
6854 case Intrinsic::s390_vpkshs: // PACKS_CC
6855 case Intrinsic::s390_vpksfs:
6856 case Intrinsic::s390_vpksgs:
6857 case Intrinsic::s390_vpklsh: // PACKLS
6858 case Intrinsic::s390_vpklsf:
6859 case Intrinsic::s390_vpklsg:
6860 case Intrinsic::s390_vpklshs: // PACKLS_CC
6861 case Intrinsic::s390_vpklsfs:
6862 case Intrinsic::s390_vpklsgs:
6863 case Intrinsic::s390_vpdi:
6864 case Intrinsic::s390_vsldb:
6865 case Intrinsic::s390_vperm:
6866 computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 1);
6867 break;
6868 case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH
6869 case Intrinsic::s390_vuplhh:
6870 case Intrinsic::s390_vuplhf:
6871 case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW
6872 case Intrinsic::s390_vupllh:
6873 case Intrinsic::s390_vupllf:
6874 IsLogical = true;
6875 LLVM_FALLTHROUGH;
6876 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH
6877 case Intrinsic::s390_vuphh:
6878 case Intrinsic::s390_vuphf:
6879 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW
6880 case Intrinsic::s390_vuplhw:
6881 case Intrinsic::s390_vuplf: {
6882 SDValue SrcOp = Op.getOperand(1);
6883 APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 0);
6884 Known = DAG.computeKnownBits(SrcOp, SrcDemE, Depth + 1);
6885 if (IsLogical) {
6886 Known = Known.zext(BitWidth);
6887 } else
6888 Known = Known.sext(BitWidth);
6889 break;
6890 }
6891 default:
6892 break;
6893 }
6894 } else {
6895 switch (Opcode) {
6896 case SystemZISD::JOIN_DWORDS:
6897 case SystemZISD::SELECT_CCMASK:
6898 computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 0);
6899 break;
6900 case SystemZISD::REPLICATE: {
6901 SDValue SrcOp = Op.getOperand(0);
6902 Known = DAG.computeKnownBits(SrcOp, Depth + 1);
6903 if (Known.getBitWidth() < BitWidth && isa<ConstantSDNode>(SrcOp))
6904 Known = Known.sext(BitWidth); // VREPI sign extends the immedate.
6905 break;
6906 }
6907 default:
6908 break;
6909 }
6910 }
6911
6912 // Known has the width of the source operand(s). Adjust if needed to match
6913 // the passed bitwidth.
6914 if (Known.getBitWidth() != BitWidth)
6915 Known = Known.anyextOrTrunc(BitWidth);
6916 }
6917
computeNumSignBitsBinOp(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth,unsigned OpNo)6918 static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts,
6919 const SelectionDAG &DAG, unsigned Depth,
6920 unsigned OpNo) {
6921 APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo);
6922 unsigned LHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo), Src0DemE, Depth + 1);
6923 if (LHS == 1) return 1; // Early out.
6924 APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1);
6925 unsigned RHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1);
6926 if (RHS == 1) return 1; // Early out.
6927 unsigned Common = std::min(LHS, RHS);
6928 unsigned SrcBitWidth = Op.getOperand(OpNo).getScalarValueSizeInBits();
6929 EVT VT = Op.getValueType();
6930 unsigned VTBits = VT.getScalarSizeInBits();
6931 if (SrcBitWidth > VTBits) { // PACK
6932 unsigned SrcExtraBits = SrcBitWidth - VTBits;
6933 if (Common > SrcExtraBits)
6934 return (Common - SrcExtraBits);
6935 return 1;
6936 }
6937 assert (SrcBitWidth == VTBits && "Expected operands of same bitwidth.");
6938 return Common;
6939 }
6940
6941 unsigned
ComputeNumSignBitsForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const6942 SystemZTargetLowering::ComputeNumSignBitsForTargetNode(
6943 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
6944 unsigned Depth) const {
6945 if (Op.getResNo() != 0)
6946 return 1;
6947 unsigned Opcode = Op.getOpcode();
6948 if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
6949 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
6950 switch (Id) {
6951 case Intrinsic::s390_vpksh: // PACKS
6952 case Intrinsic::s390_vpksf:
6953 case Intrinsic::s390_vpksg:
6954 case Intrinsic::s390_vpkshs: // PACKS_CC
6955 case Intrinsic::s390_vpksfs:
6956 case Intrinsic::s390_vpksgs:
6957 case Intrinsic::s390_vpklsh: // PACKLS
6958 case Intrinsic::s390_vpklsf:
6959 case Intrinsic::s390_vpklsg:
6960 case Intrinsic::s390_vpklshs: // PACKLS_CC
6961 case Intrinsic::s390_vpklsfs:
6962 case Intrinsic::s390_vpklsgs:
6963 case Intrinsic::s390_vpdi:
6964 case Intrinsic::s390_vsldb:
6965 case Intrinsic::s390_vperm:
6966 return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 1);
6967 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH
6968 case Intrinsic::s390_vuphh:
6969 case Intrinsic::s390_vuphf:
6970 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW
6971 case Intrinsic::s390_vuplhw:
6972 case Intrinsic::s390_vuplf: {
6973 SDValue PackedOp = Op.getOperand(1);
6974 APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 1);
6975 unsigned Tmp = DAG.ComputeNumSignBits(PackedOp, SrcDemE, Depth + 1);
6976 EVT VT = Op.getValueType();
6977 unsigned VTBits = VT.getScalarSizeInBits();
6978 Tmp += VTBits - PackedOp.getScalarValueSizeInBits();
6979 return Tmp;
6980 }
6981 default:
6982 break;
6983 }
6984 } else {
6985 switch (Opcode) {
6986 case SystemZISD::SELECT_CCMASK:
6987 return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 0);
6988 default:
6989 break;
6990 }
6991 }
6992
6993 return 1;
6994 }
6995
6996 unsigned
getStackProbeSize(MachineFunction & MF) const6997 SystemZTargetLowering::getStackProbeSize(MachineFunction &MF) const {
6998 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
6999 unsigned StackAlign = TFI->getStackAlignment();
7000 assert(StackAlign >=1 && isPowerOf2_32(StackAlign) &&
7001 "Unexpected stack alignment");
7002 // The default stack probe size is 4096 if the function has no
7003 // stack-probe-size attribute.
7004 unsigned StackProbeSize = 4096;
7005 const Function &Fn = MF.getFunction();
7006 if (Fn.hasFnAttribute("stack-probe-size"))
7007 Fn.getFnAttribute("stack-probe-size")
7008 .getValueAsString()
7009 .getAsInteger(0, StackProbeSize);
7010 // Round down to the stack alignment.
7011 StackProbeSize &= ~(StackAlign - 1);
7012 return StackProbeSize ? StackProbeSize : StackAlign;
7013 }
7014
7015 //===----------------------------------------------------------------------===//
7016 // Custom insertion
7017 //===----------------------------------------------------------------------===//
7018
7019 // Force base value Base into a register before MI. Return the register.
forceReg(MachineInstr & MI,MachineOperand & Base,const SystemZInstrInfo * TII)7020 static Register forceReg(MachineInstr &MI, MachineOperand &Base,
7021 const SystemZInstrInfo *TII) {
7022 if (Base.isReg())
7023 return Base.getReg();
7024
7025 MachineBasicBlock *MBB = MI.getParent();
7026 MachineFunction &MF = *MBB->getParent();
7027 MachineRegisterInfo &MRI = MF.getRegInfo();
7028
7029 Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
7030 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg)
7031 .add(Base)
7032 .addImm(0)
7033 .addReg(0);
7034 return Reg;
7035 }
7036
7037 // The CC operand of MI might be missing a kill marker because there
7038 // were multiple uses of CC, and ISel didn't know which to mark.
7039 // Figure out whether MI should have had a kill marker.
checkCCKill(MachineInstr & MI,MachineBasicBlock * MBB)7040 static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB) {
7041 // Scan forward through BB for a use/def of CC.
7042 MachineBasicBlock::iterator miI(std::next(MachineBasicBlock::iterator(MI)));
7043 for (MachineBasicBlock::iterator miE = MBB->end(); miI != miE; ++miI) {
7044 const MachineInstr& mi = *miI;
7045 if (mi.readsRegister(SystemZ::CC))
7046 return false;
7047 if (mi.definesRegister(SystemZ::CC))
7048 break; // Should have kill-flag - update below.
7049 }
7050
7051 // If we hit the end of the block, check whether CC is live into a
7052 // successor.
7053 if (miI == MBB->end()) {
7054 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI)
7055 if ((*SI)->isLiveIn(SystemZ::CC))
7056 return false;
7057 }
7058
7059 return true;
7060 }
7061
7062 // Return true if it is OK for this Select pseudo-opcode to be cascaded
7063 // together with other Select pseudo-opcodes into a single basic-block with
7064 // a conditional jump around it.
isSelectPseudo(MachineInstr & MI)7065 static bool isSelectPseudo(MachineInstr &MI) {
7066 switch (MI.getOpcode()) {
7067 case SystemZ::Select32:
7068 case SystemZ::Select64:
7069 case SystemZ::SelectF32:
7070 case SystemZ::SelectF64:
7071 case SystemZ::SelectF128:
7072 case SystemZ::SelectVR32:
7073 case SystemZ::SelectVR64:
7074 case SystemZ::SelectVR128:
7075 return true;
7076
7077 default:
7078 return false;
7079 }
7080 }
7081
7082 // Helper function, which inserts PHI functions into SinkMBB:
7083 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
7084 // where %FalseValue(i) and %TrueValue(i) are taken from Selects.
createPHIsForSelects(SmallVector<MachineInstr *,8> & Selects,MachineBasicBlock * TrueMBB,MachineBasicBlock * FalseMBB,MachineBasicBlock * SinkMBB)7085 static void createPHIsForSelects(SmallVector<MachineInstr*, 8> &Selects,
7086 MachineBasicBlock *TrueMBB,
7087 MachineBasicBlock *FalseMBB,
7088 MachineBasicBlock *SinkMBB) {
7089 MachineFunction *MF = TrueMBB->getParent();
7090 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
7091
7092 MachineInstr *FirstMI = Selects.front();
7093 unsigned CCValid = FirstMI->getOperand(3).getImm();
7094 unsigned CCMask = FirstMI->getOperand(4).getImm();
7095
7096 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
7097
7098 // As we are creating the PHIs, we have to be careful if there is more than
7099 // one. Later Selects may reference the results of earlier Selects, but later
7100 // PHIs have to reference the individual true/false inputs from earlier PHIs.
7101 // That also means that PHI construction must work forward from earlier to
7102 // later, and that the code must maintain a mapping from earlier PHI's
7103 // destination registers, and the registers that went into the PHI.
7104 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
7105
7106 for (auto MI : Selects) {
7107 Register DestReg = MI->getOperand(0).getReg();
7108 Register TrueReg = MI->getOperand(1).getReg();
7109 Register FalseReg = MI->getOperand(2).getReg();
7110
7111 // If this Select we are generating is the opposite condition from
7112 // the jump we generated, then we have to swap the operands for the
7113 // PHI that is going to be generated.
7114 if (MI->getOperand(4).getImm() == (CCValid ^ CCMask))
7115 std::swap(TrueReg, FalseReg);
7116
7117 if (RegRewriteTable.find(TrueReg) != RegRewriteTable.end())
7118 TrueReg = RegRewriteTable[TrueReg].first;
7119
7120 if (RegRewriteTable.find(FalseReg) != RegRewriteTable.end())
7121 FalseReg = RegRewriteTable[FalseReg].second;
7122
7123 DebugLoc DL = MI->getDebugLoc();
7124 BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(SystemZ::PHI), DestReg)
7125 .addReg(TrueReg).addMBB(TrueMBB)
7126 .addReg(FalseReg).addMBB(FalseMBB);
7127
7128 // Add this PHI to the rewrite table.
7129 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
7130 }
7131
7132 MF->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
7133 }
7134
7135 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
7136 MachineBasicBlock *
emitSelect(MachineInstr & MI,MachineBasicBlock * MBB) const7137 SystemZTargetLowering::emitSelect(MachineInstr &MI,
7138 MachineBasicBlock *MBB) const {
7139 assert(isSelectPseudo(MI) && "Bad call to emitSelect()");
7140 const SystemZInstrInfo *TII =
7141 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
7142
7143 unsigned CCValid = MI.getOperand(3).getImm();
7144 unsigned CCMask = MI.getOperand(4).getImm();
7145
7146 // If we have a sequence of Select* pseudo instructions using the
7147 // same condition code value, we want to expand all of them into
7148 // a single pair of basic blocks using the same condition.
7149 SmallVector<MachineInstr*, 8> Selects;
7150 SmallVector<MachineInstr*, 8> DbgValues;
7151 Selects.push_back(&MI);
7152 unsigned Count = 0;
7153 for (MachineBasicBlock::iterator NextMIIt =
7154 std::next(MachineBasicBlock::iterator(MI));
7155 NextMIIt != MBB->end(); ++NextMIIt) {
7156 if (isSelectPseudo(*NextMIIt)) {
7157 assert(NextMIIt->getOperand(3).getImm() == CCValid &&
7158 "Bad CCValid operands since CC was not redefined.");
7159 if (NextMIIt->getOperand(4).getImm() == CCMask ||
7160 NextMIIt->getOperand(4).getImm() == (CCValid ^ CCMask)) {
7161 Selects.push_back(&*NextMIIt);
7162 continue;
7163 }
7164 break;
7165 }
7166 if (NextMIIt->definesRegister(SystemZ::CC) ||
7167 NextMIIt->usesCustomInsertionHook())
7168 break;
7169 bool User = false;
7170 for (auto SelMI : Selects)
7171 if (NextMIIt->readsVirtualRegister(SelMI->getOperand(0).getReg())) {
7172 User = true;
7173 break;
7174 }
7175 if (NextMIIt->isDebugInstr()) {
7176 if (User) {
7177 assert(NextMIIt->isDebugValue() && "Unhandled debug opcode.");
7178 DbgValues.push_back(&*NextMIIt);
7179 }
7180 }
7181 else if (User || ++Count > 20)
7182 break;
7183 }
7184
7185 MachineInstr *LastMI = Selects.back();
7186 bool CCKilled =
7187 (LastMI->killsRegister(SystemZ::CC) || checkCCKill(*LastMI, MBB));
7188 MachineBasicBlock *StartMBB = MBB;
7189 MachineBasicBlock *JoinMBB = SystemZ::splitBlockAfter(LastMI, MBB);
7190 MachineBasicBlock *FalseMBB = SystemZ::emitBlockAfter(StartMBB);
7191
7192 // Unless CC was killed in the last Select instruction, mark it as
7193 // live-in to both FalseMBB and JoinMBB.
7194 if (!CCKilled) {
7195 FalseMBB->addLiveIn(SystemZ::CC);
7196 JoinMBB->addLiveIn(SystemZ::CC);
7197 }
7198
7199 // StartMBB:
7200 // BRC CCMask, JoinMBB
7201 // # fallthrough to FalseMBB
7202 MBB = StartMBB;
7203 BuildMI(MBB, MI.getDebugLoc(), TII->get(SystemZ::BRC))
7204 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
7205 MBB->addSuccessor(JoinMBB);
7206 MBB->addSuccessor(FalseMBB);
7207
7208 // FalseMBB:
7209 // # fallthrough to JoinMBB
7210 MBB = FalseMBB;
7211 MBB->addSuccessor(JoinMBB);
7212
7213 // JoinMBB:
7214 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
7215 // ...
7216 MBB = JoinMBB;
7217 createPHIsForSelects(Selects, StartMBB, FalseMBB, MBB);
7218 for (auto SelMI : Selects)
7219 SelMI->eraseFromParent();
7220
7221 MachineBasicBlock::iterator InsertPos = MBB->getFirstNonPHI();
7222 for (auto DbgMI : DbgValues)
7223 MBB->splice(InsertPos, StartMBB, DbgMI);
7224
7225 return JoinMBB;
7226 }
7227
7228 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
7229 // StoreOpcode is the store to use and Invert says whether the store should
7230 // happen when the condition is false rather than true. If a STORE ON
7231 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
emitCondStore(MachineInstr & MI,MachineBasicBlock * MBB,unsigned StoreOpcode,unsigned STOCOpcode,bool Invert) const7232 MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
7233 MachineBasicBlock *MBB,
7234 unsigned StoreOpcode,
7235 unsigned STOCOpcode,
7236 bool Invert) const {
7237 const SystemZInstrInfo *TII =
7238 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
7239
7240 Register SrcReg = MI.getOperand(0).getReg();
7241 MachineOperand Base = MI.getOperand(1);
7242 int64_t Disp = MI.getOperand(2).getImm();
7243 Register IndexReg = MI.getOperand(3).getReg();
7244 unsigned CCValid = MI.getOperand(4).getImm();
7245 unsigned CCMask = MI.getOperand(5).getImm();
7246 DebugLoc DL = MI.getDebugLoc();
7247
7248 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
7249
7250 // ISel pattern matching also adds a load memory operand of the same
7251 // address, so take special care to find the storing memory operand.
7252 MachineMemOperand *MMO = nullptr;
7253 for (auto *I : MI.memoperands())
7254 if (I->isStore()) {
7255 MMO = I;
7256 break;
7257 }
7258
7259 // Use STOCOpcode if possible. We could use different store patterns in
7260 // order to avoid matching the index register, but the performance trade-offs
7261 // might be more complicated in that case.
7262 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
7263 if (Invert)
7264 CCMask ^= CCValid;
7265
7266 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
7267 .addReg(SrcReg)
7268 .add(Base)
7269 .addImm(Disp)
7270 .addImm(CCValid)
7271 .addImm(CCMask)
7272 .addMemOperand(MMO);
7273
7274 MI.eraseFromParent();
7275 return MBB;
7276 }
7277
7278 // Get the condition needed to branch around the store.
7279 if (!Invert)
7280 CCMask ^= CCValid;
7281
7282 MachineBasicBlock *StartMBB = MBB;
7283 MachineBasicBlock *JoinMBB = SystemZ::splitBlockBefore(MI, MBB);
7284 MachineBasicBlock *FalseMBB = SystemZ::emitBlockAfter(StartMBB);
7285
7286 // Unless CC was killed in the CondStore instruction, mark it as
7287 // live-in to both FalseMBB and JoinMBB.
7288 if (!MI.killsRegister(SystemZ::CC) && !checkCCKill(MI, JoinMBB)) {
7289 FalseMBB->addLiveIn(SystemZ::CC);
7290 JoinMBB->addLiveIn(SystemZ::CC);
7291 }
7292
7293 // StartMBB:
7294 // BRC CCMask, JoinMBB
7295 // # fallthrough to FalseMBB
7296 MBB = StartMBB;
7297 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7298 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
7299 MBB->addSuccessor(JoinMBB);
7300 MBB->addSuccessor(FalseMBB);
7301
7302 // FalseMBB:
7303 // store %SrcReg, %Disp(%Index,%Base)
7304 // # fallthrough to JoinMBB
7305 MBB = FalseMBB;
7306 BuildMI(MBB, DL, TII->get(StoreOpcode))
7307 .addReg(SrcReg)
7308 .add(Base)
7309 .addImm(Disp)
7310 .addReg(IndexReg)
7311 .addMemOperand(MMO);
7312 MBB->addSuccessor(JoinMBB);
7313
7314 MI.eraseFromParent();
7315 return JoinMBB;
7316 }
7317
7318 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
7319 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
7320 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
7321 // BitSize is the width of the field in bits, or 0 if this is a partword
7322 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
7323 // is one of the operands. Invert says whether the field should be
7324 // inverted after performing BinOpcode (e.g. for NAND).
emitAtomicLoadBinary(MachineInstr & MI,MachineBasicBlock * MBB,unsigned BinOpcode,unsigned BitSize,bool Invert) const7325 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(
7326 MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode,
7327 unsigned BitSize, bool Invert) const {
7328 MachineFunction &MF = *MBB->getParent();
7329 const SystemZInstrInfo *TII =
7330 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
7331 MachineRegisterInfo &MRI = MF.getRegInfo();
7332 bool IsSubWord = (BitSize < 32);
7333
7334 // Extract the operands. Base can be a register or a frame index.
7335 // Src2 can be a register or immediate.
7336 Register Dest = MI.getOperand(0).getReg();
7337 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
7338 int64_t Disp = MI.getOperand(2).getImm();
7339 MachineOperand Src2 = earlyUseOperand(MI.getOperand(3));
7340 Register BitShift = IsSubWord ? MI.getOperand(4).getReg() : Register();
7341 Register NegBitShift = IsSubWord ? MI.getOperand(5).getReg() : Register();
7342 DebugLoc DL = MI.getDebugLoc();
7343 if (IsSubWord)
7344 BitSize = MI.getOperand(6).getImm();
7345
7346 // Subword operations use 32-bit registers.
7347 const TargetRegisterClass *RC = (BitSize <= 32 ?
7348 &SystemZ::GR32BitRegClass :
7349 &SystemZ::GR64BitRegClass);
7350 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
7351 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
7352
7353 // Get the right opcodes for the displacement.
7354 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
7355 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
7356 assert(LOpcode && CSOpcode && "Displacement out of range");
7357
7358 // Create virtual registers for temporary results.
7359 Register OrigVal = MRI.createVirtualRegister(RC);
7360 Register OldVal = MRI.createVirtualRegister(RC);
7361 Register NewVal = (BinOpcode || IsSubWord ?
7362 MRI.createVirtualRegister(RC) : Src2.getReg());
7363 Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
7364 Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
7365
7366 // Insert a basic block for the main loop.
7367 MachineBasicBlock *StartMBB = MBB;
7368 MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB);
7369 MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB);
7370
7371 // StartMBB:
7372 // ...
7373 // %OrigVal = L Disp(%Base)
7374 // # fall through to LoopMMB
7375 MBB = StartMBB;
7376 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0);
7377 MBB->addSuccessor(LoopMBB);
7378
7379 // LoopMBB:
7380 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
7381 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
7382 // %RotatedNewVal = OP %RotatedOldVal, %Src2
7383 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
7384 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
7385 // JNE LoopMBB
7386 // # fall through to DoneMMB
7387 MBB = LoopMBB;
7388 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
7389 .addReg(OrigVal).addMBB(StartMBB)
7390 .addReg(Dest).addMBB(LoopMBB);
7391 if (IsSubWord)
7392 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
7393 .addReg(OldVal).addReg(BitShift).addImm(0);
7394 if (Invert) {
7395 // Perform the operation normally and then invert every bit of the field.
7396 Register Tmp = MRI.createVirtualRegister(RC);
7397 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2);
7398 if (BitSize <= 32)
7399 // XILF with the upper BitSize bits set.
7400 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
7401 .addReg(Tmp).addImm(-1U << (32 - BitSize));
7402 else {
7403 // Use LCGR and add -1 to the result, which is more compact than
7404 // an XILF, XILH pair.
7405 Register Tmp2 = MRI.createVirtualRegister(RC);
7406 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
7407 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
7408 .addReg(Tmp2).addImm(-1);
7409 }
7410 } else if (BinOpcode)
7411 // A simply binary operation.
7412 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
7413 .addReg(RotatedOldVal)
7414 .add(Src2);
7415 else if (IsSubWord)
7416 // Use RISBG to rotate Src2 into position and use it to replace the
7417 // field in RotatedOldVal.
7418 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
7419 .addReg(RotatedOldVal).addReg(Src2.getReg())
7420 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize);
7421 if (IsSubWord)
7422 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
7423 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
7424 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
7425 .addReg(OldVal)
7426 .addReg(NewVal)
7427 .add(Base)
7428 .addImm(Disp);
7429 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7430 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
7431 MBB->addSuccessor(LoopMBB);
7432 MBB->addSuccessor(DoneMBB);
7433
7434 MI.eraseFromParent();
7435 return DoneMBB;
7436 }
7437
7438 // Implement EmitInstrWithCustomInserter for pseudo
7439 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
7440 // instruction that should be used to compare the current field with the
7441 // minimum or maximum value. KeepOldMask is the BRC condition-code mask
7442 // for when the current field should be kept. BitSize is the width of
7443 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
emitAtomicLoadMinMax(MachineInstr & MI,MachineBasicBlock * MBB,unsigned CompareOpcode,unsigned KeepOldMask,unsigned BitSize) const7444 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax(
7445 MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode,
7446 unsigned KeepOldMask, unsigned BitSize) const {
7447 MachineFunction &MF = *MBB->getParent();
7448 const SystemZInstrInfo *TII =
7449 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
7450 MachineRegisterInfo &MRI = MF.getRegInfo();
7451 bool IsSubWord = (BitSize < 32);
7452
7453 // Extract the operands. Base can be a register or a frame index.
7454 Register Dest = MI.getOperand(0).getReg();
7455 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
7456 int64_t Disp = MI.getOperand(2).getImm();
7457 Register Src2 = MI.getOperand(3).getReg();
7458 Register BitShift = (IsSubWord ? MI.getOperand(4).getReg() : Register());
7459 Register NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : Register());
7460 DebugLoc DL = MI.getDebugLoc();
7461 if (IsSubWord)
7462 BitSize = MI.getOperand(6).getImm();
7463
7464 // Subword operations use 32-bit registers.
7465 const TargetRegisterClass *RC = (BitSize <= 32 ?
7466 &SystemZ::GR32BitRegClass :
7467 &SystemZ::GR64BitRegClass);
7468 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
7469 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
7470
7471 // Get the right opcodes for the displacement.
7472 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
7473 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
7474 assert(LOpcode && CSOpcode && "Displacement out of range");
7475
7476 // Create virtual registers for temporary results.
7477 Register OrigVal = MRI.createVirtualRegister(RC);
7478 Register OldVal = MRI.createVirtualRegister(RC);
7479 Register NewVal = MRI.createVirtualRegister(RC);
7480 Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
7481 Register RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
7482 Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
7483
7484 // Insert 3 basic blocks for the loop.
7485 MachineBasicBlock *StartMBB = MBB;
7486 MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB);
7487 MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB);
7488 MachineBasicBlock *UseAltMBB = SystemZ::emitBlockAfter(LoopMBB);
7489 MachineBasicBlock *UpdateMBB = SystemZ::emitBlockAfter(UseAltMBB);
7490
7491 // StartMBB:
7492 // ...
7493 // %OrigVal = L Disp(%Base)
7494 // # fall through to LoopMMB
7495 MBB = StartMBB;
7496 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0);
7497 MBB->addSuccessor(LoopMBB);
7498
7499 // LoopMBB:
7500 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
7501 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
7502 // CompareOpcode %RotatedOldVal, %Src2
7503 // BRC KeepOldMask, UpdateMBB
7504 MBB = LoopMBB;
7505 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
7506 .addReg(OrigVal).addMBB(StartMBB)
7507 .addReg(Dest).addMBB(UpdateMBB);
7508 if (IsSubWord)
7509 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
7510 .addReg(OldVal).addReg(BitShift).addImm(0);
7511 BuildMI(MBB, DL, TII->get(CompareOpcode))
7512 .addReg(RotatedOldVal).addReg(Src2);
7513 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7514 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
7515 MBB->addSuccessor(UpdateMBB);
7516 MBB->addSuccessor(UseAltMBB);
7517
7518 // UseAltMBB:
7519 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
7520 // # fall through to UpdateMMB
7521 MBB = UseAltMBB;
7522 if (IsSubWord)
7523 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
7524 .addReg(RotatedOldVal).addReg(Src2)
7525 .addImm(32).addImm(31 + BitSize).addImm(0);
7526 MBB->addSuccessor(UpdateMBB);
7527
7528 // UpdateMBB:
7529 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
7530 // [ %RotatedAltVal, UseAltMBB ]
7531 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
7532 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
7533 // JNE LoopMBB
7534 // # fall through to DoneMMB
7535 MBB = UpdateMBB;
7536 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
7537 .addReg(RotatedOldVal).addMBB(LoopMBB)
7538 .addReg(RotatedAltVal).addMBB(UseAltMBB);
7539 if (IsSubWord)
7540 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
7541 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
7542 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
7543 .addReg(OldVal)
7544 .addReg(NewVal)
7545 .add(Base)
7546 .addImm(Disp);
7547 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7548 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
7549 MBB->addSuccessor(LoopMBB);
7550 MBB->addSuccessor(DoneMBB);
7551
7552 MI.eraseFromParent();
7553 return DoneMBB;
7554 }
7555
7556 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
7557 // instruction MI.
7558 MachineBasicBlock *
emitAtomicCmpSwapW(MachineInstr & MI,MachineBasicBlock * MBB) const7559 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,
7560 MachineBasicBlock *MBB) const {
7561
7562 MachineFunction &MF = *MBB->getParent();
7563 const SystemZInstrInfo *TII =
7564 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
7565 MachineRegisterInfo &MRI = MF.getRegInfo();
7566
7567 // Extract the operands. Base can be a register or a frame index.
7568 Register Dest = MI.getOperand(0).getReg();
7569 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
7570 int64_t Disp = MI.getOperand(2).getImm();
7571 Register OrigCmpVal = MI.getOperand(3).getReg();
7572 Register OrigSwapVal = MI.getOperand(4).getReg();
7573 Register BitShift = MI.getOperand(5).getReg();
7574 Register NegBitShift = MI.getOperand(6).getReg();
7575 int64_t BitSize = MI.getOperand(7).getImm();
7576 DebugLoc DL = MI.getDebugLoc();
7577
7578 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
7579
7580 // Get the right opcodes for the displacement.
7581 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp);
7582 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp);
7583 assert(LOpcode && CSOpcode && "Displacement out of range");
7584
7585 // Create virtual registers for temporary results.
7586 Register OrigOldVal = MRI.createVirtualRegister(RC);
7587 Register OldVal = MRI.createVirtualRegister(RC);
7588 Register CmpVal = MRI.createVirtualRegister(RC);
7589 Register SwapVal = MRI.createVirtualRegister(RC);
7590 Register StoreVal = MRI.createVirtualRegister(RC);
7591 Register RetryOldVal = MRI.createVirtualRegister(RC);
7592 Register RetryCmpVal = MRI.createVirtualRegister(RC);
7593 Register RetrySwapVal = MRI.createVirtualRegister(RC);
7594
7595 // Insert 2 basic blocks for the loop.
7596 MachineBasicBlock *StartMBB = MBB;
7597 MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB);
7598 MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB);
7599 MachineBasicBlock *SetMBB = SystemZ::emitBlockAfter(LoopMBB);
7600
7601 // StartMBB:
7602 // ...
7603 // %OrigOldVal = L Disp(%Base)
7604 // # fall through to LoopMMB
7605 MBB = StartMBB;
7606 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
7607 .add(Base)
7608 .addImm(Disp)
7609 .addReg(0);
7610 MBB->addSuccessor(LoopMBB);
7611
7612 // LoopMBB:
7613 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
7614 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
7615 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
7616 // %Dest = RLL %OldVal, BitSize(%BitShift)
7617 // ^^ The low BitSize bits contain the field
7618 // of interest.
7619 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
7620 // ^^ Replace the upper 32-BitSize bits of the
7621 // comparison value with those that we loaded,
7622 // so that we can use a full word comparison.
7623 // CR %Dest, %RetryCmpVal
7624 // JNE DoneMBB
7625 // # Fall through to SetMBB
7626 MBB = LoopMBB;
7627 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
7628 .addReg(OrigOldVal).addMBB(StartMBB)
7629 .addReg(RetryOldVal).addMBB(SetMBB);
7630 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
7631 .addReg(OrigCmpVal).addMBB(StartMBB)
7632 .addReg(RetryCmpVal).addMBB(SetMBB);
7633 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
7634 .addReg(OrigSwapVal).addMBB(StartMBB)
7635 .addReg(RetrySwapVal).addMBB(SetMBB);
7636 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
7637 .addReg(OldVal).addReg(BitShift).addImm(BitSize);
7638 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
7639 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
7640 BuildMI(MBB, DL, TII->get(SystemZ::CR))
7641 .addReg(Dest).addReg(RetryCmpVal);
7642 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7643 .addImm(SystemZ::CCMASK_ICMP)
7644 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
7645 MBB->addSuccessor(DoneMBB);
7646 MBB->addSuccessor(SetMBB);
7647
7648 // SetMBB:
7649 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
7650 // ^^ Replace the upper 32-BitSize bits of the new
7651 // value with those that we loaded.
7652 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
7653 // ^^ Rotate the new field to its proper position.
7654 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
7655 // JNE LoopMBB
7656 // # fall through to ExitMMB
7657 MBB = SetMBB;
7658 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
7659 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
7660 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
7661 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
7662 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
7663 .addReg(OldVal)
7664 .addReg(StoreVal)
7665 .add(Base)
7666 .addImm(Disp);
7667 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7668 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
7669 MBB->addSuccessor(LoopMBB);
7670 MBB->addSuccessor(DoneMBB);
7671
7672 // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in
7673 // to the block after the loop. At this point, CC may have been defined
7674 // either by the CR in LoopMBB or by the CS in SetMBB.
7675 if (!MI.registerDefIsDead(SystemZ::CC))
7676 DoneMBB->addLiveIn(SystemZ::CC);
7677
7678 MI.eraseFromParent();
7679 return DoneMBB;
7680 }
7681
7682 // Emit a move from two GR64s to a GR128.
7683 MachineBasicBlock *
emitPair128(MachineInstr & MI,MachineBasicBlock * MBB) const7684 SystemZTargetLowering::emitPair128(MachineInstr &MI,
7685 MachineBasicBlock *MBB) const {
7686 MachineFunction &MF = *MBB->getParent();
7687 const SystemZInstrInfo *TII =
7688 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
7689 MachineRegisterInfo &MRI = MF.getRegInfo();
7690 DebugLoc DL = MI.getDebugLoc();
7691
7692 Register Dest = MI.getOperand(0).getReg();
7693 Register Hi = MI.getOperand(1).getReg();
7694 Register Lo = MI.getOperand(2).getReg();
7695 Register Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
7696 Register Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
7697
7698 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1);
7699 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2)
7700 .addReg(Tmp1).addReg(Hi).addImm(SystemZ::subreg_h64);
7701 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
7702 .addReg(Tmp2).addReg(Lo).addImm(SystemZ::subreg_l64);
7703
7704 MI.eraseFromParent();
7705 return MBB;
7706 }
7707
7708 // Emit an extension from a GR64 to a GR128. ClearEven is true
7709 // if the high register of the GR128 value must be cleared or false if
7710 // it's "don't care".
emitExt128(MachineInstr & MI,MachineBasicBlock * MBB,bool ClearEven) const7711 MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI,
7712 MachineBasicBlock *MBB,
7713 bool ClearEven) const {
7714 MachineFunction &MF = *MBB->getParent();
7715 const SystemZInstrInfo *TII =
7716 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
7717 MachineRegisterInfo &MRI = MF.getRegInfo();
7718 DebugLoc DL = MI.getDebugLoc();
7719
7720 Register Dest = MI.getOperand(0).getReg();
7721 Register Src = MI.getOperand(1).getReg();
7722 Register In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
7723
7724 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
7725 if (ClearEven) {
7726 Register NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
7727 Register Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
7728
7729 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
7730 .addImm(0);
7731 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
7732 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64);
7733 In128 = NewIn128;
7734 }
7735 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
7736 .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64);
7737
7738 MI.eraseFromParent();
7739 return MBB;
7740 }
7741
emitMemMemWrapper(MachineInstr & MI,MachineBasicBlock * MBB,unsigned Opcode) const7742 MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper(
7743 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
7744 MachineFunction &MF = *MBB->getParent();
7745 const SystemZInstrInfo *TII =
7746 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
7747 MachineRegisterInfo &MRI = MF.getRegInfo();
7748 DebugLoc DL = MI.getDebugLoc();
7749
7750 MachineOperand DestBase = earlyUseOperand(MI.getOperand(0));
7751 uint64_t DestDisp = MI.getOperand(1).getImm();
7752 MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2));
7753 uint64_t SrcDisp = MI.getOperand(3).getImm();
7754 uint64_t Length = MI.getOperand(4).getImm();
7755
7756 // When generating more than one CLC, all but the last will need to
7757 // branch to the end when a difference is found.
7758 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
7759 SystemZ::splitBlockAfter(MI, MBB) : nullptr);
7760
7761 // Check for the loop form, in which operand 5 is the trip count.
7762 if (MI.getNumExplicitOperands() > 5) {
7763 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
7764
7765 Register StartCountReg = MI.getOperand(5).getReg();
7766 Register StartSrcReg = forceReg(MI, SrcBase, TII);
7767 Register StartDestReg = (HaveSingleBase ? StartSrcReg :
7768 forceReg(MI, DestBase, TII));
7769
7770 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
7771 Register ThisSrcReg = MRI.createVirtualRegister(RC);
7772 Register ThisDestReg = (HaveSingleBase ? ThisSrcReg :
7773 MRI.createVirtualRegister(RC));
7774 Register NextSrcReg = MRI.createVirtualRegister(RC);
7775 Register NextDestReg = (HaveSingleBase ? NextSrcReg :
7776 MRI.createVirtualRegister(RC));
7777
7778 RC = &SystemZ::GR64BitRegClass;
7779 Register ThisCountReg = MRI.createVirtualRegister(RC);
7780 Register NextCountReg = MRI.createVirtualRegister(RC);
7781
7782 MachineBasicBlock *StartMBB = MBB;
7783 MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB);
7784 MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB);
7785 MachineBasicBlock *NextMBB =
7786 (EndMBB ? SystemZ::emitBlockAfter(LoopMBB) : LoopMBB);
7787
7788 // StartMBB:
7789 // # fall through to LoopMMB
7790 MBB->addSuccessor(LoopMBB);
7791
7792 // LoopMBB:
7793 // %ThisDestReg = phi [ %StartDestReg, StartMBB ],
7794 // [ %NextDestReg, NextMBB ]
7795 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
7796 // [ %NextSrcReg, NextMBB ]
7797 // %ThisCountReg = phi [ %StartCountReg, StartMBB ],
7798 // [ %NextCountReg, NextMBB ]
7799 // ( PFD 2, 768+DestDisp(%ThisDestReg) )
7800 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
7801 // ( JLH EndMBB )
7802 //
7803 // The prefetch is used only for MVC. The JLH is used only for CLC.
7804 MBB = LoopMBB;
7805
7806 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
7807 .addReg(StartDestReg).addMBB(StartMBB)
7808 .addReg(NextDestReg).addMBB(NextMBB);
7809 if (!HaveSingleBase)
7810 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
7811 .addReg(StartSrcReg).addMBB(StartMBB)
7812 .addReg(NextSrcReg).addMBB(NextMBB);
7813 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
7814 .addReg(StartCountReg).addMBB(StartMBB)
7815 .addReg(NextCountReg).addMBB(NextMBB);
7816 if (Opcode == SystemZ::MVC)
7817 BuildMI(MBB, DL, TII->get(SystemZ::PFD))
7818 .addImm(SystemZ::PFD_WRITE)
7819 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0);
7820 BuildMI(MBB, DL, TII->get(Opcode))
7821 .addReg(ThisDestReg).addImm(DestDisp).addImm(256)
7822 .addReg(ThisSrcReg).addImm(SrcDisp);
7823 if (EndMBB) {
7824 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7825 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
7826 .addMBB(EndMBB);
7827 MBB->addSuccessor(EndMBB);
7828 MBB->addSuccessor(NextMBB);
7829 }
7830
7831 // NextMBB:
7832 // %NextDestReg = LA 256(%ThisDestReg)
7833 // %NextSrcReg = LA 256(%ThisSrcReg)
7834 // %NextCountReg = AGHI %ThisCountReg, -1
7835 // CGHI %NextCountReg, 0
7836 // JLH LoopMBB
7837 // # fall through to DoneMMB
7838 //
7839 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
7840 MBB = NextMBB;
7841
7842 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
7843 .addReg(ThisDestReg).addImm(256).addReg(0);
7844 if (!HaveSingleBase)
7845 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
7846 .addReg(ThisSrcReg).addImm(256).addReg(0);
7847 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
7848 .addReg(ThisCountReg).addImm(-1);
7849 BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
7850 .addReg(NextCountReg).addImm(0);
7851 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7852 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
7853 .addMBB(LoopMBB);
7854 MBB->addSuccessor(LoopMBB);
7855 MBB->addSuccessor(DoneMBB);
7856
7857 DestBase = MachineOperand::CreateReg(NextDestReg, false);
7858 SrcBase = MachineOperand::CreateReg(NextSrcReg, false);
7859 Length &= 255;
7860 if (EndMBB && !Length)
7861 // If the loop handled the whole CLC range, DoneMBB will be empty with
7862 // CC live-through into EndMBB, so add it as live-in.
7863 DoneMBB->addLiveIn(SystemZ::CC);
7864 MBB = DoneMBB;
7865 }
7866 // Handle any remaining bytes with straight-line code.
7867 while (Length > 0) {
7868 uint64_t ThisLength = std::min(Length, uint64_t(256));
7869 // The previous iteration might have created out-of-range displacements.
7870 // Apply them using LAY if so.
7871 if (!isUInt<12>(DestDisp)) {
7872 Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
7873 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
7874 .add(DestBase)
7875 .addImm(DestDisp)
7876 .addReg(0);
7877 DestBase = MachineOperand::CreateReg(Reg, false);
7878 DestDisp = 0;
7879 }
7880 if (!isUInt<12>(SrcDisp)) {
7881 Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
7882 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
7883 .add(SrcBase)
7884 .addImm(SrcDisp)
7885 .addReg(0);
7886 SrcBase = MachineOperand::CreateReg(Reg, false);
7887 SrcDisp = 0;
7888 }
7889 BuildMI(*MBB, MI, DL, TII->get(Opcode))
7890 .add(DestBase)
7891 .addImm(DestDisp)
7892 .addImm(ThisLength)
7893 .add(SrcBase)
7894 .addImm(SrcDisp)
7895 .setMemRefs(MI.memoperands());
7896 DestDisp += ThisLength;
7897 SrcDisp += ThisLength;
7898 Length -= ThisLength;
7899 // If there's another CLC to go, branch to the end if a difference
7900 // was found.
7901 if (EndMBB && Length > 0) {
7902 MachineBasicBlock *NextMBB = SystemZ::splitBlockBefore(MI, MBB);
7903 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7904 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
7905 .addMBB(EndMBB);
7906 MBB->addSuccessor(EndMBB);
7907 MBB->addSuccessor(NextMBB);
7908 MBB = NextMBB;
7909 }
7910 }
7911 if (EndMBB) {
7912 MBB->addSuccessor(EndMBB);
7913 MBB = EndMBB;
7914 MBB->addLiveIn(SystemZ::CC);
7915 }
7916
7917 MI.eraseFromParent();
7918 return MBB;
7919 }
7920
7921 // Decompose string pseudo-instruction MI into a loop that continually performs
7922 // Opcode until CC != 3.
emitStringWrapper(MachineInstr & MI,MachineBasicBlock * MBB,unsigned Opcode) const7923 MachineBasicBlock *SystemZTargetLowering::emitStringWrapper(
7924 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
7925 MachineFunction &MF = *MBB->getParent();
7926 const SystemZInstrInfo *TII =
7927 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
7928 MachineRegisterInfo &MRI = MF.getRegInfo();
7929 DebugLoc DL = MI.getDebugLoc();
7930
7931 uint64_t End1Reg = MI.getOperand(0).getReg();
7932 uint64_t Start1Reg = MI.getOperand(1).getReg();
7933 uint64_t Start2Reg = MI.getOperand(2).getReg();
7934 uint64_t CharReg = MI.getOperand(3).getReg();
7935
7936 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
7937 uint64_t This1Reg = MRI.createVirtualRegister(RC);
7938 uint64_t This2Reg = MRI.createVirtualRegister(RC);
7939 uint64_t End2Reg = MRI.createVirtualRegister(RC);
7940
7941 MachineBasicBlock *StartMBB = MBB;
7942 MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB);
7943 MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB);
7944
7945 // StartMBB:
7946 // # fall through to LoopMMB
7947 MBB->addSuccessor(LoopMBB);
7948
7949 // LoopMBB:
7950 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
7951 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
7952 // R0L = %CharReg
7953 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
7954 // JO LoopMBB
7955 // # fall through to DoneMMB
7956 //
7957 // The load of R0L can be hoisted by post-RA LICM.
7958 MBB = LoopMBB;
7959
7960 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
7961 .addReg(Start1Reg).addMBB(StartMBB)
7962 .addReg(End1Reg).addMBB(LoopMBB);
7963 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
7964 .addReg(Start2Reg).addMBB(StartMBB)
7965 .addReg(End2Reg).addMBB(LoopMBB);
7966 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
7967 BuildMI(MBB, DL, TII->get(Opcode))
7968 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
7969 .addReg(This1Reg).addReg(This2Reg);
7970 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
7971 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
7972 MBB->addSuccessor(LoopMBB);
7973 MBB->addSuccessor(DoneMBB);
7974
7975 DoneMBB->addLiveIn(SystemZ::CC);
7976
7977 MI.eraseFromParent();
7978 return DoneMBB;
7979 }
7980
7981 // Update TBEGIN instruction with final opcode and register clobbers.
emitTransactionBegin(MachineInstr & MI,MachineBasicBlock * MBB,unsigned Opcode,bool NoFloat) const7982 MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin(
7983 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode,
7984 bool NoFloat) const {
7985 MachineFunction &MF = *MBB->getParent();
7986 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
7987 const SystemZInstrInfo *TII = Subtarget.getInstrInfo();
7988
7989 // Update opcode.
7990 MI.setDesc(TII->get(Opcode));
7991
7992 // We cannot handle a TBEGIN that clobbers the stack or frame pointer.
7993 // Make sure to add the corresponding GRSM bits if they are missing.
7994 uint64_t Control = MI.getOperand(2).getImm();
7995 static const unsigned GPRControlBit[16] = {
7996 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
7997 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
7998 };
7999 Control |= GPRControlBit[15];
8000 if (TFI->hasFP(MF))
8001 Control |= GPRControlBit[11];
8002 MI.getOperand(2).setImm(Control);
8003
8004 // Add GPR clobbers.
8005 for (int I = 0; I < 16; I++) {
8006 if ((Control & GPRControlBit[I]) == 0) {
8007 unsigned Reg = SystemZMC::GR64Regs[I];
8008 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
8009 }
8010 }
8011
8012 // Add FPR/VR clobbers.
8013 if (!NoFloat && (Control & 4) != 0) {
8014 if (Subtarget.hasVector()) {
8015 for (int I = 0; I < 32; I++) {
8016 unsigned Reg = SystemZMC::VR128Regs[I];
8017 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
8018 }
8019 } else {
8020 for (int I = 0; I < 16; I++) {
8021 unsigned Reg = SystemZMC::FP64Regs[I];
8022 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
8023 }
8024 }
8025 }
8026
8027 return MBB;
8028 }
8029
emitLoadAndTestCmp0(MachineInstr & MI,MachineBasicBlock * MBB,unsigned Opcode) const8030 MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0(
8031 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
8032 MachineFunction &MF = *MBB->getParent();
8033 MachineRegisterInfo *MRI = &MF.getRegInfo();
8034 const SystemZInstrInfo *TII =
8035 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
8036 DebugLoc DL = MI.getDebugLoc();
8037
8038 Register SrcReg = MI.getOperand(0).getReg();
8039
8040 // Create new virtual register of the same class as source.
8041 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
8042 Register DstReg = MRI->createVirtualRegister(RC);
8043
8044 // Replace pseudo with a normal load-and-test that models the def as
8045 // well.
8046 BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg)
8047 .addReg(SrcReg)
8048 .setMIFlags(MI.getFlags());
8049 MI.eraseFromParent();
8050
8051 return MBB;
8052 }
8053
emitProbedAlloca(MachineInstr & MI,MachineBasicBlock * MBB) const8054 MachineBasicBlock *SystemZTargetLowering::emitProbedAlloca(
8055 MachineInstr &MI, MachineBasicBlock *MBB) const {
8056 MachineFunction &MF = *MBB->getParent();
8057 MachineRegisterInfo *MRI = &MF.getRegInfo();
8058 const SystemZInstrInfo *TII =
8059 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
8060 DebugLoc DL = MI.getDebugLoc();
8061 const unsigned ProbeSize = getStackProbeSize(MF);
8062 Register DstReg = MI.getOperand(0).getReg();
8063 Register SizeReg = MI.getOperand(2).getReg();
8064
8065 MachineBasicBlock *StartMBB = MBB;
8066 MachineBasicBlock *DoneMBB = SystemZ::splitBlockAfter(MI, MBB);
8067 MachineBasicBlock *LoopTestMBB = SystemZ::emitBlockAfter(StartMBB);
8068 MachineBasicBlock *LoopBodyMBB = SystemZ::emitBlockAfter(LoopTestMBB);
8069 MachineBasicBlock *TailTestMBB = SystemZ::emitBlockAfter(LoopBodyMBB);
8070 MachineBasicBlock *TailMBB = SystemZ::emitBlockAfter(TailTestMBB);
8071
8072 MachineMemOperand *VolLdMMO = MF.getMachineMemOperand(MachinePointerInfo(),
8073 MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad, 8, Align(1));
8074
8075 Register PHIReg = MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8076 Register IncReg = MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8077
8078 // LoopTestMBB
8079 // BRC TailTestMBB
8080 // # fallthrough to LoopBodyMBB
8081 StartMBB->addSuccessor(LoopTestMBB);
8082 MBB = LoopTestMBB;
8083 BuildMI(MBB, DL, TII->get(SystemZ::PHI), PHIReg)
8084 .addReg(SizeReg)
8085 .addMBB(StartMBB)
8086 .addReg(IncReg)
8087 .addMBB(LoopBodyMBB);
8088 BuildMI(MBB, DL, TII->get(SystemZ::CLGFI))
8089 .addReg(PHIReg)
8090 .addImm(ProbeSize);
8091 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
8092 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_LT)
8093 .addMBB(TailTestMBB);
8094 MBB->addSuccessor(LoopBodyMBB);
8095 MBB->addSuccessor(TailTestMBB);
8096
8097 // LoopBodyMBB: Allocate and probe by means of a volatile compare.
8098 // J LoopTestMBB
8099 MBB = LoopBodyMBB;
8100 BuildMI(MBB, DL, TII->get(SystemZ::SLGFI), IncReg)
8101 .addReg(PHIReg)
8102 .addImm(ProbeSize);
8103 BuildMI(MBB, DL, TII->get(SystemZ::SLGFI), SystemZ::R15D)
8104 .addReg(SystemZ::R15D)
8105 .addImm(ProbeSize);
8106 BuildMI(MBB, DL, TII->get(SystemZ::CG)).addReg(SystemZ::R15D)
8107 .addReg(SystemZ::R15D).addImm(ProbeSize - 8).addReg(0)
8108 .setMemRefs(VolLdMMO);
8109 BuildMI(MBB, DL, TII->get(SystemZ::J)).addMBB(LoopTestMBB);
8110 MBB->addSuccessor(LoopTestMBB);
8111
8112 // TailTestMBB
8113 // BRC DoneMBB
8114 // # fallthrough to TailMBB
8115 MBB = TailTestMBB;
8116 BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
8117 .addReg(PHIReg)
8118 .addImm(0);
8119 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
8120 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_EQ)
8121 .addMBB(DoneMBB);
8122 MBB->addSuccessor(TailMBB);
8123 MBB->addSuccessor(DoneMBB);
8124
8125 // TailMBB
8126 // # fallthrough to DoneMBB
8127 MBB = TailMBB;
8128 BuildMI(MBB, DL, TII->get(SystemZ::SLGR), SystemZ::R15D)
8129 .addReg(SystemZ::R15D)
8130 .addReg(PHIReg);
8131 BuildMI(MBB, DL, TII->get(SystemZ::CG)).addReg(SystemZ::R15D)
8132 .addReg(SystemZ::R15D).addImm(-8).addReg(PHIReg)
8133 .setMemRefs(VolLdMMO);
8134 MBB->addSuccessor(DoneMBB);
8135
8136 // DoneMBB
8137 MBB = DoneMBB;
8138 BuildMI(*MBB, MBB->begin(), DL, TII->get(TargetOpcode::COPY), DstReg)
8139 .addReg(SystemZ::R15D);
8140
8141 MI.eraseFromParent();
8142 return DoneMBB;
8143 }
8144
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * MBB) const8145 MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
8146 MachineInstr &MI, MachineBasicBlock *MBB) const {
8147 switch (MI.getOpcode()) {
8148 case SystemZ::Select32:
8149 case SystemZ::Select64:
8150 case SystemZ::SelectF32:
8151 case SystemZ::SelectF64:
8152 case SystemZ::SelectF128:
8153 case SystemZ::SelectVR32:
8154 case SystemZ::SelectVR64:
8155 case SystemZ::SelectVR128:
8156 return emitSelect(MI, MBB);
8157
8158 case SystemZ::CondStore8Mux:
8159 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false);
8160 case SystemZ::CondStore8MuxInv:
8161 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true);
8162 case SystemZ::CondStore16Mux:
8163 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false);
8164 case SystemZ::CondStore16MuxInv:
8165 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true);
8166 case SystemZ::CondStore32Mux:
8167 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false);
8168 case SystemZ::CondStore32MuxInv:
8169 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true);
8170 case SystemZ::CondStore8:
8171 return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
8172 case SystemZ::CondStore8Inv:
8173 return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
8174 case SystemZ::CondStore16:
8175 return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
8176 case SystemZ::CondStore16Inv:
8177 return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
8178 case SystemZ::CondStore32:
8179 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
8180 case SystemZ::CondStore32Inv:
8181 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
8182 case SystemZ::CondStore64:
8183 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
8184 case SystemZ::CondStore64Inv:
8185 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
8186 case SystemZ::CondStoreF32:
8187 return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
8188 case SystemZ::CondStoreF32Inv:
8189 return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
8190 case SystemZ::CondStoreF64:
8191 return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
8192 case SystemZ::CondStoreF64Inv:
8193 return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
8194
8195 case SystemZ::PAIR128:
8196 return emitPair128(MI, MBB);
8197 case SystemZ::AEXT128:
8198 return emitExt128(MI, MBB, false);
8199 case SystemZ::ZEXT128:
8200 return emitExt128(MI, MBB, true);
8201
8202 case SystemZ::ATOMIC_SWAPW:
8203 return emitAtomicLoadBinary(MI, MBB, 0, 0);
8204 case SystemZ::ATOMIC_SWAP_32:
8205 return emitAtomicLoadBinary(MI, MBB, 0, 32);
8206 case SystemZ::ATOMIC_SWAP_64:
8207 return emitAtomicLoadBinary(MI, MBB, 0, 64);
8208
8209 case SystemZ::ATOMIC_LOADW_AR:
8210 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
8211 case SystemZ::ATOMIC_LOADW_AFI:
8212 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
8213 case SystemZ::ATOMIC_LOAD_AR:
8214 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
8215 case SystemZ::ATOMIC_LOAD_AHI:
8216 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
8217 case SystemZ::ATOMIC_LOAD_AFI:
8218 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
8219 case SystemZ::ATOMIC_LOAD_AGR:
8220 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
8221 case SystemZ::ATOMIC_LOAD_AGHI:
8222 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
8223 case SystemZ::ATOMIC_LOAD_AGFI:
8224 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
8225
8226 case SystemZ::ATOMIC_LOADW_SR:
8227 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
8228 case SystemZ::ATOMIC_LOAD_SR:
8229 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
8230 case SystemZ::ATOMIC_LOAD_SGR:
8231 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
8232
8233 case SystemZ::ATOMIC_LOADW_NR:
8234 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
8235 case SystemZ::ATOMIC_LOADW_NILH:
8236 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
8237 case SystemZ::ATOMIC_LOAD_NR:
8238 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
8239 case SystemZ::ATOMIC_LOAD_NILL:
8240 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
8241 case SystemZ::ATOMIC_LOAD_NILH:
8242 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
8243 case SystemZ::ATOMIC_LOAD_NILF:
8244 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
8245 case SystemZ::ATOMIC_LOAD_NGR:
8246 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
8247 case SystemZ::ATOMIC_LOAD_NILL64:
8248 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
8249 case SystemZ::ATOMIC_LOAD_NILH64:
8250 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
8251 case SystemZ::ATOMIC_LOAD_NIHL64:
8252 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
8253 case SystemZ::ATOMIC_LOAD_NIHH64:
8254 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
8255 case SystemZ::ATOMIC_LOAD_NILF64:
8256 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
8257 case SystemZ::ATOMIC_LOAD_NIHF64:
8258 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
8259
8260 case SystemZ::ATOMIC_LOADW_OR:
8261 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
8262 case SystemZ::ATOMIC_LOADW_OILH:
8263 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
8264 case SystemZ::ATOMIC_LOAD_OR:
8265 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
8266 case SystemZ::ATOMIC_LOAD_OILL:
8267 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
8268 case SystemZ::ATOMIC_LOAD_OILH:
8269 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
8270 case SystemZ::ATOMIC_LOAD_OILF:
8271 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
8272 case SystemZ::ATOMIC_LOAD_OGR:
8273 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
8274 case SystemZ::ATOMIC_LOAD_OILL64:
8275 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
8276 case SystemZ::ATOMIC_LOAD_OILH64:
8277 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
8278 case SystemZ::ATOMIC_LOAD_OIHL64:
8279 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
8280 case SystemZ::ATOMIC_LOAD_OIHH64:
8281 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
8282 case SystemZ::ATOMIC_LOAD_OILF64:
8283 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
8284 case SystemZ::ATOMIC_LOAD_OIHF64:
8285 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
8286
8287 case SystemZ::ATOMIC_LOADW_XR:
8288 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
8289 case SystemZ::ATOMIC_LOADW_XILF:
8290 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
8291 case SystemZ::ATOMIC_LOAD_XR:
8292 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
8293 case SystemZ::ATOMIC_LOAD_XILF:
8294 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
8295 case SystemZ::ATOMIC_LOAD_XGR:
8296 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
8297 case SystemZ::ATOMIC_LOAD_XILF64:
8298 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
8299 case SystemZ::ATOMIC_LOAD_XIHF64:
8300 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
8301
8302 case SystemZ::ATOMIC_LOADW_NRi:
8303 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
8304 case SystemZ::ATOMIC_LOADW_NILHi:
8305 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
8306 case SystemZ::ATOMIC_LOAD_NRi:
8307 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
8308 case SystemZ::ATOMIC_LOAD_NILLi:
8309 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
8310 case SystemZ::ATOMIC_LOAD_NILHi:
8311 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
8312 case SystemZ::ATOMIC_LOAD_NILFi:
8313 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
8314 case SystemZ::ATOMIC_LOAD_NGRi:
8315 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
8316 case SystemZ::ATOMIC_LOAD_NILL64i:
8317 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
8318 case SystemZ::ATOMIC_LOAD_NILH64i:
8319 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
8320 case SystemZ::ATOMIC_LOAD_NIHL64i:
8321 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true);
8322 case SystemZ::ATOMIC_LOAD_NIHH64i:
8323 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true);
8324 case SystemZ::ATOMIC_LOAD_NILF64i:
8325 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
8326 case SystemZ::ATOMIC_LOAD_NIHF64i:
8327 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true);
8328
8329 case SystemZ::ATOMIC_LOADW_MIN:
8330 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
8331 SystemZ::CCMASK_CMP_LE, 0);
8332 case SystemZ::ATOMIC_LOAD_MIN_32:
8333 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
8334 SystemZ::CCMASK_CMP_LE, 32);
8335 case SystemZ::ATOMIC_LOAD_MIN_64:
8336 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
8337 SystemZ::CCMASK_CMP_LE, 64);
8338
8339 case SystemZ::ATOMIC_LOADW_MAX:
8340 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
8341 SystemZ::CCMASK_CMP_GE, 0);
8342 case SystemZ::ATOMIC_LOAD_MAX_32:
8343 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
8344 SystemZ::CCMASK_CMP_GE, 32);
8345 case SystemZ::ATOMIC_LOAD_MAX_64:
8346 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
8347 SystemZ::CCMASK_CMP_GE, 64);
8348
8349 case SystemZ::ATOMIC_LOADW_UMIN:
8350 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
8351 SystemZ::CCMASK_CMP_LE, 0);
8352 case SystemZ::ATOMIC_LOAD_UMIN_32:
8353 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
8354 SystemZ::CCMASK_CMP_LE, 32);
8355 case SystemZ::ATOMIC_LOAD_UMIN_64:
8356 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
8357 SystemZ::CCMASK_CMP_LE, 64);
8358
8359 case SystemZ::ATOMIC_LOADW_UMAX:
8360 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
8361 SystemZ::CCMASK_CMP_GE, 0);
8362 case SystemZ::ATOMIC_LOAD_UMAX_32:
8363 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
8364 SystemZ::CCMASK_CMP_GE, 32);
8365 case SystemZ::ATOMIC_LOAD_UMAX_64:
8366 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
8367 SystemZ::CCMASK_CMP_GE, 64);
8368
8369 case SystemZ::ATOMIC_CMP_SWAPW:
8370 return emitAtomicCmpSwapW(MI, MBB);
8371 case SystemZ::MVCSequence:
8372 case SystemZ::MVCLoop:
8373 return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
8374 case SystemZ::NCSequence:
8375 case SystemZ::NCLoop:
8376 return emitMemMemWrapper(MI, MBB, SystemZ::NC);
8377 case SystemZ::OCSequence:
8378 case SystemZ::OCLoop:
8379 return emitMemMemWrapper(MI, MBB, SystemZ::OC);
8380 case SystemZ::XCSequence:
8381 case SystemZ::XCLoop:
8382 return emitMemMemWrapper(MI, MBB, SystemZ::XC);
8383 case SystemZ::CLCSequence:
8384 case SystemZ::CLCLoop:
8385 return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
8386 case SystemZ::CLSTLoop:
8387 return emitStringWrapper(MI, MBB, SystemZ::CLST);
8388 case SystemZ::MVSTLoop:
8389 return emitStringWrapper(MI, MBB, SystemZ::MVST);
8390 case SystemZ::SRSTLoop:
8391 return emitStringWrapper(MI, MBB, SystemZ::SRST);
8392 case SystemZ::TBEGIN:
8393 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false);
8394 case SystemZ::TBEGIN_nofloat:
8395 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true);
8396 case SystemZ::TBEGINC:
8397 return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true);
8398 case SystemZ::LTEBRCompare_VecPseudo:
8399 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR);
8400 case SystemZ::LTDBRCompare_VecPseudo:
8401 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR);
8402 case SystemZ::LTXBRCompare_VecPseudo:
8403 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR);
8404
8405 case SystemZ::PROBED_ALLOCA:
8406 return emitProbedAlloca(MI, MBB);
8407
8408 case TargetOpcode::STACKMAP:
8409 case TargetOpcode::PATCHPOINT:
8410 return emitPatchPoint(MI, MBB);
8411
8412 default:
8413 llvm_unreachable("Unexpected instr type to insert");
8414 }
8415 }
8416
8417 // This is only used by the isel schedulers, and is needed only to prevent
8418 // compiler from crashing when list-ilp is used.
8419 const TargetRegisterClass *
getRepRegClassFor(MVT VT) const8420 SystemZTargetLowering::getRepRegClassFor(MVT VT) const {
8421 if (VT == MVT::Untyped)
8422 return &SystemZ::ADDR128BitRegClass;
8423 return TargetLowering::getRepRegClassFor(VT);
8424 }
8425