1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the SystemZTargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "SystemZISelLowering.h" 14 #include "SystemZCallingConv.h" 15 #include "SystemZConstantPoolValue.h" 16 #include "SystemZMachineFunctionInfo.h" 17 #include "SystemZTargetMachine.h" 18 #include "llvm/CodeGen/CallingConvLower.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 22 #include "llvm/IR/IntrinsicInst.h" 23 #include "llvm/IR/Intrinsics.h" 24 #include "llvm/IR/IntrinsicsS390.h" 25 #include "llvm/Support/CommandLine.h" 26 #include "llvm/Support/KnownBits.h" 27 #include <cctype> 28 29 using namespace llvm; 30 31 #define DEBUG_TYPE "systemz-lower" 32 33 namespace { 34 // Represents information about a comparison. 35 struct Comparison { 36 Comparison(SDValue Op0In, SDValue Op1In, SDValue ChainIn) 37 : Op0(Op0In), Op1(Op1In), Chain(ChainIn), 38 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} 39 40 // The operands to the comparison. 41 SDValue Op0, Op1; 42 43 // Chain if this is a strict floating-point comparison. 44 SDValue Chain; 45 46 // The opcode that should be used to compare Op0 and Op1. 47 unsigned Opcode; 48 49 // A SystemZICMP value. Only used for integer comparisons. 50 unsigned ICmpType; 51 52 // The mask of CC values that Opcode can produce. 53 unsigned CCValid; 54 55 // The mask of CC values for which the original condition is true. 56 unsigned CCMask; 57 }; 58 } // end anonymous namespace 59 60 // Classify VT as either 32 or 64 bit. 61 static bool is32Bit(EVT VT) { 62 switch (VT.getSimpleVT().SimpleTy) { 63 case MVT::i32: 64 return true; 65 case MVT::i64: 66 return false; 67 default: 68 llvm_unreachable("Unsupported type"); 69 } 70 } 71 72 // Return a version of MachineOperand that can be safely used before the 73 // final use. 74 static MachineOperand earlyUseOperand(MachineOperand Op) { 75 if (Op.isReg()) 76 Op.setIsKill(false); 77 return Op; 78 } 79 80 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, 81 const SystemZSubtarget &STI) 82 : TargetLowering(TM), Subtarget(STI) { 83 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0)); 84 85 // Set up the register classes. 86 if (Subtarget.hasHighWord()) 87 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); 88 else 89 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 90 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 91 if (!useSoftFloat()) { 92 if (Subtarget.hasVector()) { 93 addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass); 94 addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass); 95 } else { 96 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 97 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 98 } 99 if (Subtarget.hasVectorEnhancements1()) 100 addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass); 101 else 102 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 103 104 if (Subtarget.hasVector()) { 105 addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass); 106 addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass); 107 addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass); 108 addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass); 109 addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass); 110 addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass); 111 } 112 } 113 114 // Compute derived properties from the register classes 115 computeRegisterProperties(Subtarget.getRegisterInfo()); 116 117 // Set up special registers. 118 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 119 120 // TODO: It may be better to default to latency-oriented scheduling, however 121 // LLVM's current latency-oriented scheduler can't handle physreg definitions 122 // such as SystemZ has with CC, so set this to the register-pressure 123 // scheduler, because it can. 124 setSchedulingPreference(Sched::RegPressure); 125 126 setBooleanContents(ZeroOrOneBooleanContent); 127 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 128 129 // Instructions are strings of 2-byte aligned 2-byte values. 130 setMinFunctionAlignment(Align(2)); 131 // For performance reasons we prefer 16-byte alignment. 132 setPrefFunctionAlignment(Align(16)); 133 134 // Handle operations that are handled in a similar way for all types. 135 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 136 I <= MVT::LAST_FP_VALUETYPE; 137 ++I) { 138 MVT VT = MVT::SimpleValueType(I); 139 if (isTypeLegal(VT)) { 140 // Lower SET_CC into an IPM-based sequence. 141 setOperationAction(ISD::SETCC, VT, Custom); 142 setOperationAction(ISD::STRICT_FSETCC, VT, Custom); 143 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); 144 145 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 146 setOperationAction(ISD::SELECT, VT, Expand); 147 148 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 149 setOperationAction(ISD::SELECT_CC, VT, Custom); 150 setOperationAction(ISD::BR_CC, VT, Custom); 151 } 152 } 153 154 // Expand jump table branches as address arithmetic followed by an 155 // indirect jump. 156 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 157 158 // Expand BRCOND into a BR_CC (see above). 159 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 160 161 // Handle integer types. 162 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 163 I <= MVT::LAST_INTEGER_VALUETYPE; 164 ++I) { 165 MVT VT = MVT::SimpleValueType(I); 166 if (isTypeLegal(VT)) { 167 setOperationAction(ISD::ABS, VT, Legal); 168 169 // Expand individual DIV and REMs into DIVREMs. 170 setOperationAction(ISD::SDIV, VT, Expand); 171 setOperationAction(ISD::UDIV, VT, Expand); 172 setOperationAction(ISD::SREM, VT, Expand); 173 setOperationAction(ISD::UREM, VT, Expand); 174 setOperationAction(ISD::SDIVREM, VT, Custom); 175 setOperationAction(ISD::UDIVREM, VT, Custom); 176 177 // Support addition/subtraction with overflow. 178 setOperationAction(ISD::SADDO, VT, Custom); 179 setOperationAction(ISD::SSUBO, VT, Custom); 180 181 // Support addition/subtraction with carry. 182 setOperationAction(ISD::UADDO, VT, Custom); 183 setOperationAction(ISD::USUBO, VT, Custom); 184 185 // Support carry in as value rather than glue. 186 setOperationAction(ISD::ADDCARRY, VT, Custom); 187 setOperationAction(ISD::SUBCARRY, VT, Custom); 188 189 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and 190 // stores, putting a serialization instruction after the stores. 191 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); 192 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 193 194 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are 195 // available, or if the operand is constant. 196 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 197 198 // Use POPCNT on z196 and above. 199 if (Subtarget.hasPopulationCount()) 200 setOperationAction(ISD::CTPOP, VT, Custom); 201 else 202 setOperationAction(ISD::CTPOP, VT, Expand); 203 204 // No special instructions for these. 205 setOperationAction(ISD::CTTZ, VT, Expand); 206 setOperationAction(ISD::ROTR, VT, Expand); 207 208 // Use *MUL_LOHI where possible instead of MULH*. 209 setOperationAction(ISD::MULHS, VT, Expand); 210 setOperationAction(ISD::MULHU, VT, Expand); 211 setOperationAction(ISD::SMUL_LOHI, VT, Custom); 212 setOperationAction(ISD::UMUL_LOHI, VT, Custom); 213 214 // Only z196 and above have native support for conversions to unsigned. 215 // On z10, promoting to i64 doesn't generate an inexact condition for 216 // values that are outside the i32 range but in the i64 range, so use 217 // the default expansion. 218 if (!Subtarget.hasFPExtension()) 219 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 220 221 // Mirror those settings for STRICT_FP_TO_[SU]INT. Note that these all 222 // default to Expand, so need to be modified to Legal where appropriate. 223 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Legal); 224 if (Subtarget.hasFPExtension()) 225 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Legal); 226 227 // And similarly for STRICT_[SU]INT_TO_FP. 228 setOperationAction(ISD::STRICT_SINT_TO_FP, VT, Legal); 229 if (Subtarget.hasFPExtension()) 230 setOperationAction(ISD::STRICT_UINT_TO_FP, VT, Legal); 231 } 232 } 233 234 // Type legalization will convert 8- and 16-bit atomic operations into 235 // forms that operate on i32s (but still keeping the original memory VT). 236 // Lower them into full i32 operations. 237 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 238 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 239 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 240 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 241 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 242 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 243 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 244 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 245 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 246 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 247 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 248 249 // Even though i128 is not a legal type, we still need to custom lower 250 // the atomic operations in order to exploit SystemZ instructions. 251 setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); 252 setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); 253 254 // We can use the CC result of compare-and-swap to implement 255 // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS. 256 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom); 257 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom); 258 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom); 259 260 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 261 262 // Traps are legal, as we will convert them to "j .+2". 263 setOperationAction(ISD::TRAP, MVT::Other, Legal); 264 265 // z10 has instructions for signed but not unsigned FP conversion. 266 // Handle unsigned 32-bit types as signed 64-bit types. 267 if (!Subtarget.hasFPExtension()) { 268 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 269 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 270 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Promote); 271 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand); 272 } 273 274 // We have native support for a 64-bit CTLZ, via FLOGR. 275 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 276 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote); 277 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 278 279 // On z15 we have native support for a 64-bit CTPOP. 280 if (Subtarget.hasMiscellaneousExtensions3()) { 281 setOperationAction(ISD::CTPOP, MVT::i32, Promote); 282 setOperationAction(ISD::CTPOP, MVT::i64, Legal); 283 } 284 285 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 286 setOperationAction(ISD::OR, MVT::i64, Custom); 287 288 // Expand 128 bit shifts without using a libcall. 289 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 290 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 291 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 292 setLibcallName(RTLIB::SRL_I128, nullptr); 293 setLibcallName(RTLIB::SHL_I128, nullptr); 294 setLibcallName(RTLIB::SRA_I128, nullptr); 295 296 // We have native instructions for i8, i16 and i32 extensions, but not i1. 297 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 298 for (MVT VT : MVT::integer_valuetypes()) { 299 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 300 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 301 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 302 } 303 304 // Handle the various types of symbolic address. 305 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 306 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 307 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 308 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 309 setOperationAction(ISD::JumpTable, PtrVT, Custom); 310 311 // We need to handle dynamic allocations specially because of the 312 // 160-byte area at the bottom of the stack. 313 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 314 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom); 315 316 // Use custom expanders so that we can force the function to use 317 // a frame pointer. 318 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 319 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 320 321 // Handle prefetches with PFD or PFDRL. 322 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 323 324 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 325 // Assume by default that all vector operations need to be expanded. 326 for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode) 327 if (getOperationAction(Opcode, VT) == Legal) 328 setOperationAction(Opcode, VT, Expand); 329 330 // Likewise all truncating stores and extending loads. 331 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { 332 setTruncStoreAction(VT, InnerVT, Expand); 333 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 334 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 335 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 336 } 337 338 if (isTypeLegal(VT)) { 339 // These operations are legal for anything that can be stored in a 340 // vector register, even if there is no native support for the format 341 // as such. In particular, we can do these for v4f32 even though there 342 // are no specific instructions for that format. 343 setOperationAction(ISD::LOAD, VT, Legal); 344 setOperationAction(ISD::STORE, VT, Legal); 345 setOperationAction(ISD::VSELECT, VT, Legal); 346 setOperationAction(ISD::BITCAST, VT, Legal); 347 setOperationAction(ISD::UNDEF, VT, Legal); 348 349 // Likewise, except that we need to replace the nodes with something 350 // more specific. 351 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 352 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 353 } 354 } 355 356 // Handle integer vector types. 357 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { 358 if (isTypeLegal(VT)) { 359 // These operations have direct equivalents. 360 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal); 361 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal); 362 setOperationAction(ISD::ADD, VT, Legal); 363 setOperationAction(ISD::SUB, VT, Legal); 364 if (VT != MVT::v2i64) 365 setOperationAction(ISD::MUL, VT, Legal); 366 setOperationAction(ISD::ABS, VT, Legal); 367 setOperationAction(ISD::AND, VT, Legal); 368 setOperationAction(ISD::OR, VT, Legal); 369 setOperationAction(ISD::XOR, VT, Legal); 370 if (Subtarget.hasVectorEnhancements1()) 371 setOperationAction(ISD::CTPOP, VT, Legal); 372 else 373 setOperationAction(ISD::CTPOP, VT, Custom); 374 setOperationAction(ISD::CTTZ, VT, Legal); 375 setOperationAction(ISD::CTLZ, VT, Legal); 376 377 // Convert a GPR scalar to a vector by inserting it into element 0. 378 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 379 380 // Use a series of unpacks for extensions. 381 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); 382 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); 383 384 // Detect shifts by a scalar amount and convert them into 385 // V*_BY_SCALAR. 386 setOperationAction(ISD::SHL, VT, Custom); 387 setOperationAction(ISD::SRA, VT, Custom); 388 setOperationAction(ISD::SRL, VT, Custom); 389 390 // At present ROTL isn't matched by DAGCombiner. ROTR should be 391 // converted into ROTL. 392 setOperationAction(ISD::ROTL, VT, Expand); 393 setOperationAction(ISD::ROTR, VT, Expand); 394 395 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands 396 // and inverting the result as necessary. 397 setOperationAction(ISD::SETCC, VT, Custom); 398 setOperationAction(ISD::STRICT_FSETCC, VT, Custom); 399 if (Subtarget.hasVectorEnhancements1()) 400 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); 401 } 402 } 403 404 if (Subtarget.hasVector()) { 405 // There should be no need to check for float types other than v2f64 406 // since <2 x f32> isn't a legal type. 407 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 408 setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal); 409 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 410 setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal); 411 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 412 setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal); 413 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 414 setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal); 415 416 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal); 417 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f64, Legal); 418 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal); 419 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f64, Legal); 420 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal); 421 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f64, Legal); 422 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal); 423 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f64, Legal); 424 } 425 426 if (Subtarget.hasVectorEnhancements2()) { 427 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 428 setOperationAction(ISD::FP_TO_SINT, MVT::v4f32, Legal); 429 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 430 setOperationAction(ISD::FP_TO_UINT, MVT::v4f32, Legal); 431 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 432 setOperationAction(ISD::SINT_TO_FP, MVT::v4f32, Legal); 433 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 434 setOperationAction(ISD::UINT_TO_FP, MVT::v4f32, Legal); 435 436 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); 437 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f32, Legal); 438 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal); 439 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f32, Legal); 440 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal); 441 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f32, Legal); 442 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal); 443 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f32, Legal); 444 } 445 446 // Handle floating-point types. 447 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 448 I <= MVT::LAST_FP_VALUETYPE; 449 ++I) { 450 MVT VT = MVT::SimpleValueType(I); 451 if (isTypeLegal(VT)) { 452 // We can use FI for FRINT. 453 setOperationAction(ISD::FRINT, VT, Legal); 454 455 // We can use the extended form of FI for other rounding operations. 456 if (Subtarget.hasFPExtension()) { 457 setOperationAction(ISD::FNEARBYINT, VT, Legal); 458 setOperationAction(ISD::FFLOOR, VT, Legal); 459 setOperationAction(ISD::FCEIL, VT, Legal); 460 setOperationAction(ISD::FTRUNC, VT, Legal); 461 setOperationAction(ISD::FROUND, VT, Legal); 462 } 463 464 // No special instructions for these. 465 setOperationAction(ISD::FSIN, VT, Expand); 466 setOperationAction(ISD::FCOS, VT, Expand); 467 setOperationAction(ISD::FSINCOS, VT, Expand); 468 setOperationAction(ISD::FREM, VT, Expand); 469 setOperationAction(ISD::FPOW, VT, Expand); 470 471 // Handle constrained floating-point operations. 472 setOperationAction(ISD::STRICT_FADD, VT, Legal); 473 setOperationAction(ISD::STRICT_FSUB, VT, Legal); 474 setOperationAction(ISD::STRICT_FMUL, VT, Legal); 475 setOperationAction(ISD::STRICT_FDIV, VT, Legal); 476 setOperationAction(ISD::STRICT_FMA, VT, Legal); 477 setOperationAction(ISD::STRICT_FSQRT, VT, Legal); 478 setOperationAction(ISD::STRICT_FRINT, VT, Legal); 479 setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal); 480 setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal); 481 if (Subtarget.hasFPExtension()) { 482 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal); 483 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal); 484 setOperationAction(ISD::STRICT_FCEIL, VT, Legal); 485 setOperationAction(ISD::STRICT_FROUND, VT, Legal); 486 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal); 487 } 488 } 489 } 490 491 // Handle floating-point vector types. 492 if (Subtarget.hasVector()) { 493 // Scalar-to-vector conversion is just a subreg. 494 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 495 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 496 497 // Some insertions and extractions can be done directly but others 498 // need to go via integers. 499 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 500 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 501 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 502 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 503 504 // These operations have direct equivalents. 505 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 506 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 507 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 508 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 509 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 510 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 511 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 512 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 513 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 514 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 515 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 516 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 517 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 518 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 519 520 // Handle constrained floating-point operations. 521 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); 522 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); 523 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); 524 setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal); 525 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); 526 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal); 527 setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal); 528 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal); 529 setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal); 530 setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal); 531 setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal); 532 setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal); 533 } 534 535 // The vector enhancements facility 1 has instructions for these. 536 if (Subtarget.hasVectorEnhancements1()) { 537 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 538 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 539 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 540 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 541 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 542 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 543 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 544 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 545 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 546 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 547 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 548 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 549 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 550 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 551 552 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 553 setOperationAction(ISD::FMAXIMUM, MVT::f64, Legal); 554 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 555 setOperationAction(ISD::FMINIMUM, MVT::f64, Legal); 556 557 setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal); 558 setOperationAction(ISD::FMAXIMUM, MVT::v2f64, Legal); 559 setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal); 560 setOperationAction(ISD::FMINIMUM, MVT::v2f64, Legal); 561 562 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 563 setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); 564 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 565 setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); 566 567 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 568 setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); 569 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 570 setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); 571 572 setOperationAction(ISD::FMAXNUM, MVT::f128, Legal); 573 setOperationAction(ISD::FMAXIMUM, MVT::f128, Legal); 574 setOperationAction(ISD::FMINNUM, MVT::f128, Legal); 575 setOperationAction(ISD::FMINIMUM, MVT::f128, Legal); 576 577 // Handle constrained floating-point operations. 578 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal); 579 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); 580 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); 581 setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal); 582 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); 583 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal); 584 setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal); 585 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal); 586 setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal); 587 setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal); 588 setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal); 589 setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal); 590 for (auto VT : { MVT::f32, MVT::f64, MVT::f128, 591 MVT::v4f32, MVT::v2f64 }) { 592 setOperationAction(ISD::STRICT_FMAXNUM, VT, Legal); 593 setOperationAction(ISD::STRICT_FMINNUM, VT, Legal); 594 setOperationAction(ISD::STRICT_FMAXIMUM, VT, Legal); 595 setOperationAction(ISD::STRICT_FMINIMUM, VT, Legal); 596 } 597 } 598 599 // We only have fused f128 multiply-addition on vector registers. 600 if (!Subtarget.hasVectorEnhancements1()) { 601 setOperationAction(ISD::FMA, MVT::f128, Expand); 602 setOperationAction(ISD::STRICT_FMA, MVT::f128, Expand); 603 } 604 605 // We don't have a copysign instruction on vector registers. 606 if (Subtarget.hasVectorEnhancements1()) 607 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 608 609 // Needed so that we don't try to implement f128 constant loads using 610 // a load-and-extend of a f80 constant (in cases where the constant 611 // would fit in an f80). 612 for (MVT VT : MVT::fp_valuetypes()) 613 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); 614 615 // We don't have extending load instruction on vector registers. 616 if (Subtarget.hasVectorEnhancements1()) { 617 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand); 618 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand); 619 } 620 621 // Floating-point truncation and stores need to be done separately. 622 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 623 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 624 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 625 626 // We have 64-bit FPR<->GPR moves, but need special handling for 627 // 32-bit forms. 628 if (!Subtarget.hasVector()) { 629 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 630 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 631 } 632 633 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 634 // structure, but VAEND is a no-op. 635 setOperationAction(ISD::VASTART, MVT::Other, Custom); 636 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 637 setOperationAction(ISD::VAEND, MVT::Other, Expand); 638 639 // Codes for which we want to perform some z-specific combinations. 640 setTargetDAGCombine(ISD::ZERO_EXTEND); 641 setTargetDAGCombine(ISD::SIGN_EXTEND); 642 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); 643 setTargetDAGCombine(ISD::LOAD); 644 setTargetDAGCombine(ISD::STORE); 645 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 646 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 647 setTargetDAGCombine(ISD::FP_ROUND); 648 setTargetDAGCombine(ISD::STRICT_FP_ROUND); 649 setTargetDAGCombine(ISD::FP_EXTEND); 650 setTargetDAGCombine(ISD::SINT_TO_FP); 651 setTargetDAGCombine(ISD::UINT_TO_FP); 652 setTargetDAGCombine(ISD::STRICT_FP_EXTEND); 653 setTargetDAGCombine(ISD::BSWAP); 654 setTargetDAGCombine(ISD::SDIV); 655 setTargetDAGCombine(ISD::UDIV); 656 setTargetDAGCombine(ISD::SREM); 657 setTargetDAGCombine(ISD::UREM); 658 setTargetDAGCombine(ISD::INTRINSIC_VOID); 659 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 660 661 // Handle intrinsics. 662 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 663 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 664 665 // We want to use MVC in preference to even a single load/store pair. 666 MaxStoresPerMemcpy = 0; 667 MaxStoresPerMemcpyOptSize = 0; 668 669 // The main memset sequence is a byte store followed by an MVC. 670 // Two STC or MV..I stores win over that, but the kind of fused stores 671 // generated by target-independent code don't when the byte value is 672 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 673 // than "STC;MVC". Handle the choice in target-specific code instead. 674 MaxStoresPerMemset = 0; 675 MaxStoresPerMemsetOptSize = 0; 676 677 // Default to having -disable-strictnode-mutation on 678 IsStrictFPEnabled = true; 679 } 680 681 bool SystemZTargetLowering::useSoftFloat() const { 682 return Subtarget.hasSoftFloat(); 683 } 684 685 EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL, 686 LLVMContext &, EVT VT) const { 687 if (!VT.isVector()) 688 return MVT::i32; 689 return VT.changeVectorElementTypeToInteger(); 690 } 691 692 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd( 693 const MachineFunction &MF, EVT VT) const { 694 VT = VT.getScalarType(); 695 696 if (!VT.isSimple()) 697 return false; 698 699 switch (VT.getSimpleVT().SimpleTy) { 700 case MVT::f32: 701 case MVT::f64: 702 return true; 703 case MVT::f128: 704 return Subtarget.hasVectorEnhancements1(); 705 default: 706 break; 707 } 708 709 return false; 710 } 711 712 // Return true if the constant can be generated with a vector instruction, 713 // such as VGM, VGMB or VREPI. 714 bool SystemZVectorConstantInfo::isVectorConstantLegal( 715 const SystemZSubtarget &Subtarget) { 716 const SystemZInstrInfo *TII = 717 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 718 if (!Subtarget.hasVector() || 719 (isFP128 && !Subtarget.hasVectorEnhancements1())) 720 return false; 721 722 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally- 723 // preferred way of creating all-zero and all-one vectors so give it 724 // priority over other methods below. 725 unsigned Mask = 0; 726 unsigned I = 0; 727 for (; I < SystemZ::VectorBytes; ++I) { 728 uint64_t Byte = IntBits.lshr(I * 8).trunc(8).getZExtValue(); 729 if (Byte == 0xff) 730 Mask |= 1ULL << I; 731 else if (Byte != 0) 732 break; 733 } 734 if (I == SystemZ::VectorBytes) { 735 Opcode = SystemZISD::BYTE_MASK; 736 OpVals.push_back(Mask); 737 VecVT = MVT::getVectorVT(MVT::getIntegerVT(8), 16); 738 return true; 739 } 740 741 if (SplatBitSize > 64) 742 return false; 743 744 auto tryValue = [&](uint64_t Value) -> bool { 745 // Try VECTOR REPLICATE IMMEDIATE 746 int64_t SignedValue = SignExtend64(Value, SplatBitSize); 747 if (isInt<16>(SignedValue)) { 748 OpVals.push_back(((unsigned) SignedValue)); 749 Opcode = SystemZISD::REPLICATE; 750 VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize), 751 SystemZ::VectorBits / SplatBitSize); 752 return true; 753 } 754 // Try VECTOR GENERATE MASK 755 unsigned Start, End; 756 if (TII->isRxSBGMask(Value, SplatBitSize, Start, End)) { 757 // isRxSBGMask returns the bit numbers for a full 64-bit value, with 0 758 // denoting 1 << 63 and 63 denoting 1. Convert them to bit numbers for 759 // an SplatBitSize value, so that 0 denotes 1 << (SplatBitSize-1). 760 OpVals.push_back(Start - (64 - SplatBitSize)); 761 OpVals.push_back(End - (64 - SplatBitSize)); 762 Opcode = SystemZISD::ROTATE_MASK; 763 VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize), 764 SystemZ::VectorBits / SplatBitSize); 765 return true; 766 } 767 return false; 768 }; 769 770 // First try assuming that any undefined bits above the highest set bit 771 // and below the lowest set bit are 1s. This increases the likelihood of 772 // being able to use a sign-extended element value in VECTOR REPLICATE 773 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK. 774 uint64_t SplatBitsZ = SplatBits.getZExtValue(); 775 uint64_t SplatUndefZ = SplatUndef.getZExtValue(); 776 uint64_t Lower = 777 (SplatUndefZ & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1)); 778 uint64_t Upper = 779 (SplatUndefZ & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1)); 780 if (tryValue(SplatBitsZ | Upper | Lower)) 781 return true; 782 783 // Now try assuming that any undefined bits between the first and 784 // last defined set bits are set. This increases the chances of 785 // using a non-wraparound mask. 786 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower; 787 return tryValue(SplatBitsZ | Middle); 788 } 789 790 SystemZVectorConstantInfo::SystemZVectorConstantInfo(APFloat FPImm) { 791 IntBits = FPImm.bitcastToAPInt().zextOrSelf(128); 792 isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad()); 793 SplatBits = FPImm.bitcastToAPInt(); 794 unsigned Width = SplatBits.getBitWidth(); 795 IntBits <<= (SystemZ::VectorBits - Width); 796 797 // Find the smallest splat. 798 while (Width > 8) { 799 unsigned HalfSize = Width / 2; 800 APInt HighValue = SplatBits.lshr(HalfSize).trunc(HalfSize); 801 APInt LowValue = SplatBits.trunc(HalfSize); 802 803 // If the two halves do not match, stop here. 804 if (HighValue != LowValue || 8 > HalfSize) 805 break; 806 807 SplatBits = HighValue; 808 Width = HalfSize; 809 } 810 SplatUndef = 0; 811 SplatBitSize = Width; 812 } 813 814 SystemZVectorConstantInfo::SystemZVectorConstantInfo(BuildVectorSDNode *BVN) { 815 assert(BVN->isConstant() && "Expected a constant BUILD_VECTOR"); 816 bool HasAnyUndefs; 817 818 // Get IntBits by finding the 128 bit splat. 819 BVN->isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128, 820 true); 821 822 // Get SplatBits by finding the 8 bit or greater splat. 823 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8, 824 true); 825 } 826 827 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 828 bool ForCodeSize) const { 829 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 830 if (Imm.isZero() || Imm.isNegZero()) 831 return true; 832 833 return SystemZVectorConstantInfo(Imm).isVectorConstantLegal(Subtarget); 834 } 835 836 /// Returns true if stack probing through inline assembly is requested. 837 bool SystemZTargetLowering::hasInlineStackProbe(MachineFunction &MF) const { 838 // If the function specifically requests inline stack probes, emit them. 839 if (MF.getFunction().hasFnAttribute("probe-stack")) 840 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() == 841 "inline-asm"; 842 return false; 843 } 844 845 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 846 // We can use CGFI or CLGFI. 847 return isInt<32>(Imm) || isUInt<32>(Imm); 848 } 849 850 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const { 851 // We can use ALGFI or SLGFI. 852 return isUInt<32>(Imm) || isUInt<32>(-Imm); 853 } 854 855 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses( 856 EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const { 857 // Unaligned accesses should never be slower than the expanded version. 858 // We check specifically for aligned accesses in the few cases where 859 // they are required. 860 if (Fast) 861 *Fast = true; 862 return true; 863 } 864 865 // Information about the addressing mode for a memory access. 866 struct AddressingMode { 867 // True if a long displacement is supported. 868 bool LongDisplacement; 869 870 // True if use of index register is supported. 871 bool IndexReg; 872 873 AddressingMode(bool LongDispl, bool IdxReg) : 874 LongDisplacement(LongDispl), IndexReg(IdxReg) {} 875 }; 876 877 // Return the desired addressing mode for a Load which has only one use (in 878 // the same block) which is a Store. 879 static AddressingMode getLoadStoreAddrMode(bool HasVector, 880 Type *Ty) { 881 // With vector support a Load->Store combination may be combined to either 882 // an MVC or vector operations and it seems to work best to allow the 883 // vector addressing mode. 884 if (HasVector) 885 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); 886 887 // Otherwise only the MVC case is special. 888 bool MVC = Ty->isIntegerTy(8); 889 return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/); 890 } 891 892 // Return the addressing mode which seems most desirable given an LLVM 893 // Instruction pointer. 894 static AddressingMode 895 supportedAddressingMode(Instruction *I, bool HasVector) { 896 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 897 switch (II->getIntrinsicID()) { 898 default: break; 899 case Intrinsic::memset: 900 case Intrinsic::memmove: 901 case Intrinsic::memcpy: 902 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); 903 } 904 } 905 906 if (isa<LoadInst>(I) && I->hasOneUse()) { 907 auto *SingleUser = cast<Instruction>(*I->user_begin()); 908 if (SingleUser->getParent() == I->getParent()) { 909 if (isa<ICmpInst>(SingleUser)) { 910 if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1))) 911 if (C->getBitWidth() <= 64 && 912 (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue()))) 913 // Comparison of memory with 16 bit signed / unsigned immediate 914 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); 915 } else if (isa<StoreInst>(SingleUser)) 916 // Load->Store 917 return getLoadStoreAddrMode(HasVector, I->getType()); 918 } 919 } else if (auto *StoreI = dyn_cast<StoreInst>(I)) { 920 if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand())) 921 if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent()) 922 // Load->Store 923 return getLoadStoreAddrMode(HasVector, LoadI->getType()); 924 } 925 926 if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) { 927 928 // * Use LDE instead of LE/LEY for z13 to avoid partial register 929 // dependencies (LDE only supports small offsets). 930 // * Utilize the vector registers to hold floating point 931 // values (vector load / store instructions only support small 932 // offsets). 933 934 Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() : 935 I->getOperand(0)->getType()); 936 bool IsFPAccess = MemAccessTy->isFloatingPointTy(); 937 bool IsVectorAccess = MemAccessTy->isVectorTy(); 938 939 // A store of an extracted vector element will be combined into a VSTE type 940 // instruction. 941 if (!IsVectorAccess && isa<StoreInst>(I)) { 942 Value *DataOp = I->getOperand(0); 943 if (isa<ExtractElementInst>(DataOp)) 944 IsVectorAccess = true; 945 } 946 947 // A load which gets inserted into a vector element will be combined into a 948 // VLE type instruction. 949 if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) { 950 User *LoadUser = *I->user_begin(); 951 if (isa<InsertElementInst>(LoadUser)) 952 IsVectorAccess = true; 953 } 954 955 if (IsFPAccess || IsVectorAccess) 956 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); 957 } 958 959 return AddressingMode(true/*LongDispl*/, true/*IdxReg*/); 960 } 961 962 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL, 963 const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const { 964 // Punt on globals for now, although they can be used in limited 965 // RELATIVE LONG cases. 966 if (AM.BaseGV) 967 return false; 968 969 // Require a 20-bit signed offset. 970 if (!isInt<20>(AM.BaseOffs)) 971 return false; 972 973 AddressingMode SupportedAM(true, true); 974 if (I != nullptr) 975 SupportedAM = supportedAddressingMode(I, Subtarget.hasVector()); 976 977 if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs)) 978 return false; 979 980 if (!SupportedAM.IndexReg) 981 // No indexing allowed. 982 return AM.Scale == 0; 983 else 984 // Indexing is OK but no scale factor can be applied. 985 return AM.Scale == 0 || AM.Scale == 1; 986 } 987 988 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { 989 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) 990 return false; 991 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedSize(); 992 unsigned ToBits = ToType->getPrimitiveSizeInBits().getFixedSize(); 993 return FromBits > ToBits; 994 } 995 996 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { 997 if (!FromVT.isInteger() || !ToVT.isInteger()) 998 return false; 999 unsigned FromBits = FromVT.getFixedSizeInBits(); 1000 unsigned ToBits = ToVT.getFixedSizeInBits(); 1001 return FromBits > ToBits; 1002 } 1003 1004 //===----------------------------------------------------------------------===// 1005 // Inline asm support 1006 //===----------------------------------------------------------------------===// 1007 1008 TargetLowering::ConstraintType 1009 SystemZTargetLowering::getConstraintType(StringRef Constraint) const { 1010 if (Constraint.size() == 1) { 1011 switch (Constraint[0]) { 1012 case 'a': // Address register 1013 case 'd': // Data register (equivalent to 'r') 1014 case 'f': // Floating-point register 1015 case 'h': // High-part register 1016 case 'r': // General-purpose register 1017 case 'v': // Vector register 1018 return C_RegisterClass; 1019 1020 case 'Q': // Memory with base and unsigned 12-bit displacement 1021 case 'R': // Likewise, plus an index 1022 case 'S': // Memory with base and signed 20-bit displacement 1023 case 'T': // Likewise, plus an index 1024 case 'm': // Equivalent to 'T'. 1025 return C_Memory; 1026 1027 case 'I': // Unsigned 8-bit constant 1028 case 'J': // Unsigned 12-bit constant 1029 case 'K': // Signed 16-bit constant 1030 case 'L': // Signed 20-bit displacement (on all targets we support) 1031 case 'M': // 0x7fffffff 1032 return C_Immediate; 1033 1034 default: 1035 break; 1036 } 1037 } 1038 return TargetLowering::getConstraintType(Constraint); 1039 } 1040 1041 TargetLowering::ConstraintWeight SystemZTargetLowering:: 1042 getSingleConstraintMatchWeight(AsmOperandInfo &info, 1043 const char *constraint) const { 1044 ConstraintWeight weight = CW_Invalid; 1045 Value *CallOperandVal = info.CallOperandVal; 1046 // If we don't have a value, we can't do a match, 1047 // but allow it at the lowest weight. 1048 if (!CallOperandVal) 1049 return CW_Default; 1050 Type *type = CallOperandVal->getType(); 1051 // Look at the constraint type. 1052 switch (*constraint) { 1053 default: 1054 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 1055 break; 1056 1057 case 'a': // Address register 1058 case 'd': // Data register (equivalent to 'r') 1059 case 'h': // High-part register 1060 case 'r': // General-purpose register 1061 if (CallOperandVal->getType()->isIntegerTy()) 1062 weight = CW_Register; 1063 break; 1064 1065 case 'f': // Floating-point register 1066 if (type->isFloatingPointTy()) 1067 weight = CW_Register; 1068 break; 1069 1070 case 'v': // Vector register 1071 if ((type->isVectorTy() || type->isFloatingPointTy()) && 1072 Subtarget.hasVector()) 1073 weight = CW_Register; 1074 break; 1075 1076 case 'I': // Unsigned 8-bit constant 1077 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 1078 if (isUInt<8>(C->getZExtValue())) 1079 weight = CW_Constant; 1080 break; 1081 1082 case 'J': // Unsigned 12-bit constant 1083 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 1084 if (isUInt<12>(C->getZExtValue())) 1085 weight = CW_Constant; 1086 break; 1087 1088 case 'K': // Signed 16-bit constant 1089 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 1090 if (isInt<16>(C->getSExtValue())) 1091 weight = CW_Constant; 1092 break; 1093 1094 case 'L': // Signed 20-bit displacement (on all targets we support) 1095 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 1096 if (isInt<20>(C->getSExtValue())) 1097 weight = CW_Constant; 1098 break; 1099 1100 case 'M': // 0x7fffffff 1101 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 1102 if (C->getZExtValue() == 0x7fffffff) 1103 weight = CW_Constant; 1104 break; 1105 } 1106 return weight; 1107 } 1108 1109 // Parse a "{tNNN}" register constraint for which the register type "t" 1110 // has already been verified. MC is the class associated with "t" and 1111 // Map maps 0-based register numbers to LLVM register numbers. 1112 static std::pair<unsigned, const TargetRegisterClass *> 1113 parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, 1114 const unsigned *Map, unsigned Size) { 1115 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 1116 if (isdigit(Constraint[2])) { 1117 unsigned Index; 1118 bool Failed = 1119 Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index); 1120 if (!Failed && Index < Size && Map[Index]) 1121 return std::make_pair(Map[Index], RC); 1122 } 1123 return std::make_pair(0U, nullptr); 1124 } 1125 1126 std::pair<unsigned, const TargetRegisterClass *> 1127 SystemZTargetLowering::getRegForInlineAsmConstraint( 1128 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 1129 if (Constraint.size() == 1) { 1130 // GCC Constraint Letters 1131 switch (Constraint[0]) { 1132 default: break; 1133 case 'd': // Data register (equivalent to 'r') 1134 case 'r': // General-purpose register 1135 if (VT == MVT::i64) 1136 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 1137 else if (VT == MVT::i128) 1138 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 1139 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 1140 1141 case 'a': // Address register 1142 if (VT == MVT::i64) 1143 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 1144 else if (VT == MVT::i128) 1145 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 1146 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 1147 1148 case 'h': // High-part register (an LLVM extension) 1149 return std::make_pair(0U, &SystemZ::GRH32BitRegClass); 1150 1151 case 'f': // Floating-point register 1152 if (!useSoftFloat()) { 1153 if (VT == MVT::f64) 1154 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 1155 else if (VT == MVT::f128) 1156 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 1157 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 1158 } 1159 break; 1160 case 'v': // Vector register 1161 if (Subtarget.hasVector()) { 1162 if (VT == MVT::f32) 1163 return std::make_pair(0U, &SystemZ::VR32BitRegClass); 1164 if (VT == MVT::f64) 1165 return std::make_pair(0U, &SystemZ::VR64BitRegClass); 1166 return std::make_pair(0U, &SystemZ::VR128BitRegClass); 1167 } 1168 break; 1169 } 1170 } 1171 if (Constraint.size() > 0 && Constraint[0] == '{') { 1172 // We need to override the default register parsing for GPRs and FPRs 1173 // because the interpretation depends on VT. The internal names of 1174 // the registers are also different from the external names 1175 // (F0D and F0S instead of F0, etc.). 1176 if (Constraint[1] == 'r') { 1177 if (VT == MVT::i32) 1178 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 1179 SystemZMC::GR32Regs, 16); 1180 if (VT == MVT::i128) 1181 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 1182 SystemZMC::GR128Regs, 16); 1183 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 1184 SystemZMC::GR64Regs, 16); 1185 } 1186 if (Constraint[1] == 'f') { 1187 if (useSoftFloat()) 1188 return std::make_pair( 1189 0u, static_cast<const TargetRegisterClass *>(nullptr)); 1190 if (VT == MVT::f32) 1191 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 1192 SystemZMC::FP32Regs, 16); 1193 if (VT == MVT::f128) 1194 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 1195 SystemZMC::FP128Regs, 16); 1196 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 1197 SystemZMC::FP64Regs, 16); 1198 } 1199 if (Constraint[1] == 'v') { 1200 if (!Subtarget.hasVector()) 1201 return std::make_pair( 1202 0u, static_cast<const TargetRegisterClass *>(nullptr)); 1203 if (VT == MVT::f32) 1204 return parseRegisterNumber(Constraint, &SystemZ::VR32BitRegClass, 1205 SystemZMC::VR32Regs, 32); 1206 if (VT == MVT::f64) 1207 return parseRegisterNumber(Constraint, &SystemZ::VR64BitRegClass, 1208 SystemZMC::VR64Regs, 32); 1209 return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass, 1210 SystemZMC::VR128Regs, 32); 1211 } 1212 } 1213 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 1214 } 1215 1216 // FIXME? Maybe this could be a TableGen attribute on some registers and 1217 // this table could be generated automatically from RegInfo. 1218 Register SystemZTargetLowering::getRegisterByName(const char *RegName, LLT VT, 1219 const MachineFunction &MF) const { 1220 1221 Register Reg = StringSwitch<Register>(RegName) 1222 .Case("r15", SystemZ::R15D) 1223 .Default(0); 1224 if (Reg) 1225 return Reg; 1226 report_fatal_error("Invalid register name global variable"); 1227 } 1228 1229 void SystemZTargetLowering:: 1230 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 1231 std::vector<SDValue> &Ops, 1232 SelectionDAG &DAG) const { 1233 // Only support length 1 constraints for now. 1234 if (Constraint.length() == 1) { 1235 switch (Constraint[0]) { 1236 case 'I': // Unsigned 8-bit constant 1237 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 1238 if (isUInt<8>(C->getZExtValue())) 1239 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 1240 Op.getValueType())); 1241 return; 1242 1243 case 'J': // Unsigned 12-bit constant 1244 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 1245 if (isUInt<12>(C->getZExtValue())) 1246 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 1247 Op.getValueType())); 1248 return; 1249 1250 case 'K': // Signed 16-bit constant 1251 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 1252 if (isInt<16>(C->getSExtValue())) 1253 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), 1254 Op.getValueType())); 1255 return; 1256 1257 case 'L': // Signed 20-bit displacement (on all targets we support) 1258 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 1259 if (isInt<20>(C->getSExtValue())) 1260 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), 1261 Op.getValueType())); 1262 return; 1263 1264 case 'M': // 0x7fffffff 1265 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 1266 if (C->getZExtValue() == 0x7fffffff) 1267 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 1268 Op.getValueType())); 1269 return; 1270 } 1271 } 1272 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 1273 } 1274 1275 //===----------------------------------------------------------------------===// 1276 // Calling conventions 1277 //===----------------------------------------------------------------------===// 1278 1279 #include "SystemZGenCallingConv.inc" 1280 1281 const MCPhysReg *SystemZTargetLowering::getScratchRegisters( 1282 CallingConv::ID) const { 1283 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D, 1284 SystemZ::R14D, 0 }; 1285 return ScratchRegs; 1286 } 1287 1288 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, 1289 Type *ToType) const { 1290 return isTruncateFree(FromType, ToType); 1291 } 1292 1293 bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 1294 return CI->isTailCall(); 1295 } 1296 1297 // We do not yet support 128-bit single-element vector types. If the user 1298 // attempts to use such types as function argument or return type, prefer 1299 // to error out instead of emitting code violating the ABI. 1300 static void VerifyVectorType(MVT VT, EVT ArgVT) { 1301 if (ArgVT.isVector() && !VT.isVector()) 1302 report_fatal_error("Unsupported vector argument or return type"); 1303 } 1304 1305 static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) { 1306 for (unsigned i = 0; i < Ins.size(); ++i) 1307 VerifyVectorType(Ins[i].VT, Ins[i].ArgVT); 1308 } 1309 1310 static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1311 for (unsigned i = 0; i < Outs.size(); ++i) 1312 VerifyVectorType(Outs[i].VT, Outs[i].ArgVT); 1313 } 1314 1315 // Value is a value that has been passed to us in the location described by VA 1316 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 1317 // any loads onto Chain. 1318 static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL, 1319 CCValAssign &VA, SDValue Chain, 1320 SDValue Value) { 1321 // If the argument has been promoted from a smaller type, insert an 1322 // assertion to capture this. 1323 if (VA.getLocInfo() == CCValAssign::SExt) 1324 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 1325 DAG.getValueType(VA.getValVT())); 1326 else if (VA.getLocInfo() == CCValAssign::ZExt) 1327 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 1328 DAG.getValueType(VA.getValVT())); 1329 1330 if (VA.isExtInLoc()) 1331 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 1332 else if (VA.getLocInfo() == CCValAssign::BCvt) { 1333 // If this is a short vector argument loaded from the stack, 1334 // extend from i64 to full vector size and then bitcast. 1335 assert(VA.getLocVT() == MVT::i64); 1336 assert(VA.getValVT().isVector()); 1337 Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)}); 1338 Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value); 1339 } else 1340 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 1341 return Value; 1342 } 1343 1344 // Value is a value of type VA.getValVT() that we need to copy into 1345 // the location described by VA. Return a copy of Value converted to 1346 // VA.getValVT(). The caller is responsible for handling indirect values. 1347 static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL, 1348 CCValAssign &VA, SDValue Value) { 1349 switch (VA.getLocInfo()) { 1350 case CCValAssign::SExt: 1351 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 1352 case CCValAssign::ZExt: 1353 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 1354 case CCValAssign::AExt: 1355 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 1356 case CCValAssign::BCvt: 1357 // If this is a short vector argument to be stored to the stack, 1358 // bitcast to v2i64 and then extract first element. 1359 assert(VA.getLocVT() == MVT::i64); 1360 assert(VA.getValVT().isVector()); 1361 Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value); 1362 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value, 1363 DAG.getConstant(0, DL, MVT::i32)); 1364 case CCValAssign::Full: 1365 return Value; 1366 default: 1367 llvm_unreachable("Unhandled getLocInfo()"); 1368 } 1369 } 1370 1371 static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) { 1372 SDLoc DL(In); 1373 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, 1374 DAG.getIntPtrConstant(0, DL)); 1375 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, 1376 DAG.getIntPtrConstant(1, DL)); 1377 SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL, 1378 MVT::Untyped, Hi, Lo); 1379 return SDValue(Pair, 0); 1380 } 1381 1382 static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) { 1383 SDLoc DL(In); 1384 SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64, 1385 DL, MVT::i64, In); 1386 SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64, 1387 DL, MVT::i64, In); 1388 return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi); 1389 } 1390 1391 bool SystemZTargetLowering::splitValueIntoRegisterParts( 1392 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 1393 unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const { 1394 EVT ValueVT = Val.getValueType(); 1395 assert((ValueVT != MVT::i128 || 1396 ((NumParts == 1 && PartVT == MVT::Untyped) || 1397 (NumParts == 2 && PartVT == MVT::i64))) && 1398 "Unknown handling of i128 value."); 1399 if (ValueVT == MVT::i128 && NumParts == 1) { 1400 // Inline assembly operand. 1401 Parts[0] = lowerI128ToGR128(DAG, Val); 1402 return true; 1403 } 1404 return false; 1405 } 1406 1407 SDValue SystemZTargetLowering::joinRegisterPartsIntoValue( 1408 SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, 1409 MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const { 1410 assert((ValueVT != MVT::i128 || 1411 ((NumParts == 1 && PartVT == MVT::Untyped) || 1412 (NumParts == 2 && PartVT == MVT::i64))) && 1413 "Unknown handling of i128 value."); 1414 if (ValueVT == MVT::i128 && NumParts == 1) 1415 // Inline assembly operand. 1416 return lowerGR128ToI128(DAG, Parts[0]); 1417 return SDValue(); 1418 } 1419 1420 SDValue SystemZTargetLowering::LowerFormalArguments( 1421 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 1422 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1423 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1424 MachineFunction &MF = DAG.getMachineFunction(); 1425 MachineFrameInfo &MFI = MF.getFrameInfo(); 1426 MachineRegisterInfo &MRI = MF.getRegInfo(); 1427 SystemZMachineFunctionInfo *FuncInfo = 1428 MF.getInfo<SystemZMachineFunctionInfo>(); 1429 auto *TFL = 1430 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering()); 1431 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1432 1433 // Detect unsupported vector argument types. 1434 if (Subtarget.hasVector()) 1435 VerifyVectorTypes(Ins); 1436 1437 // Assign locations to all of the incoming arguments. 1438 SmallVector<CCValAssign, 16> ArgLocs; 1439 SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1440 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 1441 1442 unsigned NumFixedGPRs = 0; 1443 unsigned NumFixedFPRs = 0; 1444 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1445 SDValue ArgValue; 1446 CCValAssign &VA = ArgLocs[I]; 1447 EVT LocVT = VA.getLocVT(); 1448 if (VA.isRegLoc()) { 1449 // Arguments passed in registers 1450 const TargetRegisterClass *RC; 1451 switch (LocVT.getSimpleVT().SimpleTy) { 1452 default: 1453 // Integers smaller than i64 should be promoted to i64. 1454 llvm_unreachable("Unexpected argument type"); 1455 case MVT::i32: 1456 NumFixedGPRs += 1; 1457 RC = &SystemZ::GR32BitRegClass; 1458 break; 1459 case MVT::i64: 1460 NumFixedGPRs += 1; 1461 RC = &SystemZ::GR64BitRegClass; 1462 break; 1463 case MVT::f32: 1464 NumFixedFPRs += 1; 1465 RC = &SystemZ::FP32BitRegClass; 1466 break; 1467 case MVT::f64: 1468 NumFixedFPRs += 1; 1469 RC = &SystemZ::FP64BitRegClass; 1470 break; 1471 case MVT::v16i8: 1472 case MVT::v8i16: 1473 case MVT::v4i32: 1474 case MVT::v2i64: 1475 case MVT::v4f32: 1476 case MVT::v2f64: 1477 RC = &SystemZ::VR128BitRegClass; 1478 break; 1479 } 1480 1481 Register VReg = MRI.createVirtualRegister(RC); 1482 MRI.addLiveIn(VA.getLocReg(), VReg); 1483 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 1484 } else { 1485 assert(VA.isMemLoc() && "Argument not register or memory"); 1486 1487 // Create the frame index object for this incoming parameter. 1488 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8, 1489 VA.getLocMemOffset(), true); 1490 1491 // Create the SelectionDAG nodes corresponding to a load 1492 // from this parameter. Unpromoted ints and floats are 1493 // passed as right-justified 8-byte values. 1494 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1495 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 1496 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, 1497 DAG.getIntPtrConstant(4, DL)); 1498 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 1499 MachinePointerInfo::getFixedStack(MF, FI)); 1500 } 1501 1502 // Convert the value of the argument register into the value that's 1503 // being passed. 1504 if (VA.getLocInfo() == CCValAssign::Indirect) { 1505 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 1506 MachinePointerInfo())); 1507 // If the original argument was split (e.g. i128), we need 1508 // to load all parts of it here (using the same address). 1509 unsigned ArgIndex = Ins[I].OrigArgIndex; 1510 assert (Ins[I].PartOffset == 0); 1511 while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) { 1512 CCValAssign &PartVA = ArgLocs[I + 1]; 1513 unsigned PartOffset = Ins[I + 1].PartOffset; 1514 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 1515 DAG.getIntPtrConstant(PartOffset, DL)); 1516 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 1517 MachinePointerInfo())); 1518 ++I; 1519 } 1520 } else 1521 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 1522 } 1523 1524 if (IsVarArg) { 1525 // Save the number of non-varargs registers for later use by va_start, etc. 1526 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 1527 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 1528 1529 // Likewise the address (in the form of a frame index) of where the 1530 // first stack vararg would be. The 1-byte size here is arbitrary. 1531 int64_t StackSize = CCInfo.getNextStackOffset(); 1532 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true)); 1533 1534 // ...and a similar frame index for the caller-allocated save area 1535 // that will be used to store the incoming registers. 1536 int64_t RegSaveOffset = 1537 -SystemZMC::ELFCallFrameSize + TFL->getRegSpillOffset(MF, SystemZ::R2D) - 16; 1538 unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true); 1539 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 1540 1541 // Store the FPR varargs in the reserved frame slots. (We store the 1542 // GPRs as part of the prologue.) 1543 if (NumFixedFPRs < SystemZ::ELFNumArgFPRs && !useSoftFloat()) { 1544 SDValue MemOps[SystemZ::ELFNumArgFPRs]; 1545 for (unsigned I = NumFixedFPRs; I < SystemZ::ELFNumArgFPRs; ++I) { 1546 unsigned Offset = TFL->getRegSpillOffset(MF, SystemZ::ELFArgFPRs[I]); 1547 int FI = 1548 MFI.CreateFixedObject(8, -SystemZMC::ELFCallFrameSize + Offset, true); 1549 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 1550 unsigned VReg = MF.addLiveIn(SystemZ::ELFArgFPRs[I], 1551 &SystemZ::FP64BitRegClass); 1552 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 1553 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 1554 MachinePointerInfo::getFixedStack(MF, FI)); 1555 } 1556 // Join the stores, which are independent of one another. 1557 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 1558 makeArrayRef(&MemOps[NumFixedFPRs], 1559 SystemZ::ELFNumArgFPRs-NumFixedFPRs)); 1560 } 1561 } 1562 1563 return Chain; 1564 } 1565 1566 static bool canUseSiblingCall(const CCState &ArgCCInfo, 1567 SmallVectorImpl<CCValAssign> &ArgLocs, 1568 SmallVectorImpl<ISD::OutputArg> &Outs) { 1569 // Punt if there are any indirect or stack arguments, or if the call 1570 // needs the callee-saved argument register R6, or if the call uses 1571 // the callee-saved register arguments SwiftSelf and SwiftError. 1572 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1573 CCValAssign &VA = ArgLocs[I]; 1574 if (VA.getLocInfo() == CCValAssign::Indirect) 1575 return false; 1576 if (!VA.isRegLoc()) 1577 return false; 1578 Register Reg = VA.getLocReg(); 1579 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) 1580 return false; 1581 if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError()) 1582 return false; 1583 } 1584 return true; 1585 } 1586 1587 SDValue 1588 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 1589 SmallVectorImpl<SDValue> &InVals) const { 1590 SelectionDAG &DAG = CLI.DAG; 1591 SDLoc &DL = CLI.DL; 1592 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1593 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1594 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1595 SDValue Chain = CLI.Chain; 1596 SDValue Callee = CLI.Callee; 1597 bool &IsTailCall = CLI.IsTailCall; 1598 CallingConv::ID CallConv = CLI.CallConv; 1599 bool IsVarArg = CLI.IsVarArg; 1600 MachineFunction &MF = DAG.getMachineFunction(); 1601 EVT PtrVT = getPointerTy(MF.getDataLayout()); 1602 LLVMContext &Ctx = *DAG.getContext(); 1603 1604 // Detect unsupported vector argument and return types. 1605 if (Subtarget.hasVector()) { 1606 VerifyVectorTypes(Outs); 1607 VerifyVectorTypes(Ins); 1608 } 1609 1610 // Analyze the operands of the call, assigning locations to each operand. 1611 SmallVector<CCValAssign, 16> ArgLocs; 1612 SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, Ctx); 1613 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 1614 1615 // We don't support GuaranteedTailCallOpt, only automatically-detected 1616 // sibling calls. 1617 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs)) 1618 IsTailCall = false; 1619 1620 // Get a count of how many bytes are to be pushed on the stack. 1621 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 1622 1623 // Mark the start of the call. 1624 if (!IsTailCall) 1625 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); 1626 1627 // Copy argument values to their designated locations. 1628 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 1629 SmallVector<SDValue, 8> MemOpChains; 1630 SDValue StackPtr; 1631 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1632 CCValAssign &VA = ArgLocs[I]; 1633 SDValue ArgValue = OutVals[I]; 1634 1635 if (VA.getLocInfo() == CCValAssign::Indirect) { 1636 // Store the argument in a stack slot and pass its address. 1637 unsigned ArgIndex = Outs[I].OrigArgIndex; 1638 EVT SlotVT; 1639 if (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) { 1640 // Allocate the full stack space for a promoted (and split) argument. 1641 Type *OrigArgType = CLI.Args[Outs[I].OrigArgIndex].Ty; 1642 EVT OrigArgVT = getValueType(MF.getDataLayout(), OrigArgType); 1643 MVT PartVT = getRegisterTypeForCallingConv(Ctx, CLI.CallConv, OrigArgVT); 1644 unsigned N = getNumRegistersForCallingConv(Ctx, CLI.CallConv, OrigArgVT); 1645 SlotVT = EVT::getIntegerVT(Ctx, PartVT.getSizeInBits() * N); 1646 } else { 1647 SlotVT = Outs[I].ArgVT; 1648 } 1649 SDValue SpillSlot = DAG.CreateStackTemporary(SlotVT); 1650 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 1651 MemOpChains.push_back( 1652 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 1653 MachinePointerInfo::getFixedStack(MF, FI))); 1654 // If the original argument was split (e.g. i128), we need 1655 // to store all parts of it here (and pass just one address). 1656 assert (Outs[I].PartOffset == 0); 1657 while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) { 1658 SDValue PartValue = OutVals[I + 1]; 1659 unsigned PartOffset = Outs[I + 1].PartOffset; 1660 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 1661 DAG.getIntPtrConstant(PartOffset, DL)); 1662 MemOpChains.push_back( 1663 DAG.getStore(Chain, DL, PartValue, Address, 1664 MachinePointerInfo::getFixedStack(MF, FI))); 1665 assert((PartOffset + PartValue.getValueType().getStoreSize() <= 1666 SlotVT.getStoreSize()) && "Not enough space for argument part!"); 1667 ++I; 1668 } 1669 ArgValue = SpillSlot; 1670 } else 1671 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 1672 1673 if (VA.isRegLoc()) 1674 // Queue up the argument copies and emit them at the end. 1675 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 1676 else { 1677 assert(VA.isMemLoc() && "Argument not register or memory"); 1678 1679 // Work out the address of the stack slot. Unpromoted ints and 1680 // floats are passed as right-justified 8-byte values. 1681 if (!StackPtr.getNode()) 1682 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 1683 unsigned Offset = SystemZMC::ELFCallFrameSize + VA.getLocMemOffset(); 1684 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 1685 Offset += 4; 1686 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 1687 DAG.getIntPtrConstant(Offset, DL)); 1688 1689 // Emit the store. 1690 MemOpChains.push_back( 1691 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 1692 } 1693 } 1694 1695 // Join the stores, which are independent of one another. 1696 if (!MemOpChains.empty()) 1697 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 1698 1699 // Accept direct calls by converting symbolic call addresses to the 1700 // associated Target* opcodes. Force %r1 to be used for indirect 1701 // tail calls. 1702 SDValue Glue; 1703 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1704 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 1705 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 1706 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1707 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 1708 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 1709 } else if (IsTailCall) { 1710 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); 1711 Glue = Chain.getValue(1); 1712 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); 1713 } 1714 1715 // Build a sequence of copy-to-reg nodes, chained and glued together. 1716 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 1717 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 1718 RegsToPass[I].second, Glue); 1719 Glue = Chain.getValue(1); 1720 } 1721 1722 // The first call operand is the chain and the second is the target address. 1723 SmallVector<SDValue, 8> Ops; 1724 Ops.push_back(Chain); 1725 Ops.push_back(Callee); 1726 1727 // Add argument registers to the end of the list so that they are 1728 // known live into the call. 1729 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 1730 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 1731 RegsToPass[I].second.getValueType())); 1732 1733 // Add a register mask operand representing the call-preserved registers. 1734 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 1735 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 1736 assert(Mask && "Missing call preserved mask for calling convention"); 1737 Ops.push_back(DAG.getRegisterMask(Mask)); 1738 1739 // Glue the call to the argument copies, if any. 1740 if (Glue.getNode()) 1741 Ops.push_back(Glue); 1742 1743 // Emit the call. 1744 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1745 if (IsTailCall) 1746 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops); 1747 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops); 1748 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); 1749 Glue = Chain.getValue(1); 1750 1751 // Mark the end of the call, which is glued to the call itself. 1752 Chain = DAG.getCALLSEQ_END(Chain, 1753 DAG.getConstant(NumBytes, DL, PtrVT, true), 1754 DAG.getConstant(0, DL, PtrVT, true), 1755 Glue, DL); 1756 Glue = Chain.getValue(1); 1757 1758 // Assign locations to each value returned by this call. 1759 SmallVector<CCValAssign, 16> RetLocs; 1760 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx); 1761 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 1762 1763 // Copy all of the result registers out of their specified physreg. 1764 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 1765 CCValAssign &VA = RetLocs[I]; 1766 1767 // Copy the value out, gluing the copy to the end of the call sequence. 1768 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 1769 VA.getLocVT(), Glue); 1770 Chain = RetValue.getValue(1); 1771 Glue = RetValue.getValue(2); 1772 1773 // Convert the value of the return register into the value that's 1774 // being returned. 1775 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 1776 } 1777 1778 return Chain; 1779 } 1780 1781 bool SystemZTargetLowering:: 1782 CanLowerReturn(CallingConv::ID CallConv, 1783 MachineFunction &MF, bool isVarArg, 1784 const SmallVectorImpl<ISD::OutputArg> &Outs, 1785 LLVMContext &Context) const { 1786 // Detect unsupported vector return types. 1787 if (Subtarget.hasVector()) 1788 VerifyVectorTypes(Outs); 1789 1790 // Special case that we cannot easily detect in RetCC_SystemZ since 1791 // i128 is not a legal type. 1792 for (auto &Out : Outs) 1793 if (Out.ArgVT == MVT::i128) 1794 return false; 1795 1796 SmallVector<CCValAssign, 16> RetLocs; 1797 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context); 1798 return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ); 1799 } 1800 1801 SDValue 1802 SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1803 bool IsVarArg, 1804 const SmallVectorImpl<ISD::OutputArg> &Outs, 1805 const SmallVectorImpl<SDValue> &OutVals, 1806 const SDLoc &DL, SelectionDAG &DAG) const { 1807 MachineFunction &MF = DAG.getMachineFunction(); 1808 1809 // Detect unsupported vector return types. 1810 if (Subtarget.hasVector()) 1811 VerifyVectorTypes(Outs); 1812 1813 // Assign locations to each returned value. 1814 SmallVector<CCValAssign, 16> RetLocs; 1815 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); 1816 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 1817 1818 // Quick exit for void returns 1819 if (RetLocs.empty()) 1820 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 1821 1822 if (CallConv == CallingConv::GHC) 1823 report_fatal_error("GHC functions return void only"); 1824 1825 // Copy the result values into the output registers. 1826 SDValue Glue; 1827 SmallVector<SDValue, 4> RetOps; 1828 RetOps.push_back(Chain); 1829 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 1830 CCValAssign &VA = RetLocs[I]; 1831 SDValue RetValue = OutVals[I]; 1832 1833 // Make the return register live on exit. 1834 assert(VA.isRegLoc() && "Can only return in registers!"); 1835 1836 // Promote the value as required. 1837 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 1838 1839 // Chain and glue the copies together. 1840 Register Reg = VA.getLocReg(); 1841 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 1842 Glue = Chain.getValue(1); 1843 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 1844 } 1845 1846 // Update chain and glue. 1847 RetOps[0] = Chain; 1848 if (Glue.getNode()) 1849 RetOps.push_back(Glue); 1850 1851 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps); 1852 } 1853 1854 // Return true if Op is an intrinsic node with chain that returns the CC value 1855 // as its only (other) argument. Provide the associated SystemZISD opcode and 1856 // the mask of valid CC values if so. 1857 static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, 1858 unsigned &CCValid) { 1859 unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1860 switch (Id) { 1861 case Intrinsic::s390_tbegin: 1862 Opcode = SystemZISD::TBEGIN; 1863 CCValid = SystemZ::CCMASK_TBEGIN; 1864 return true; 1865 1866 case Intrinsic::s390_tbegin_nofloat: 1867 Opcode = SystemZISD::TBEGIN_NOFLOAT; 1868 CCValid = SystemZ::CCMASK_TBEGIN; 1869 return true; 1870 1871 case Intrinsic::s390_tend: 1872 Opcode = SystemZISD::TEND; 1873 CCValid = SystemZ::CCMASK_TEND; 1874 return true; 1875 1876 default: 1877 return false; 1878 } 1879 } 1880 1881 // Return true if Op is an intrinsic node without chain that returns the 1882 // CC value as its final argument. Provide the associated SystemZISD 1883 // opcode and the mask of valid CC values if so. 1884 static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) { 1885 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1886 switch (Id) { 1887 case Intrinsic::s390_vpkshs: 1888 case Intrinsic::s390_vpksfs: 1889 case Intrinsic::s390_vpksgs: 1890 Opcode = SystemZISD::PACKS_CC; 1891 CCValid = SystemZ::CCMASK_VCMP; 1892 return true; 1893 1894 case Intrinsic::s390_vpklshs: 1895 case Intrinsic::s390_vpklsfs: 1896 case Intrinsic::s390_vpklsgs: 1897 Opcode = SystemZISD::PACKLS_CC; 1898 CCValid = SystemZ::CCMASK_VCMP; 1899 return true; 1900 1901 case Intrinsic::s390_vceqbs: 1902 case Intrinsic::s390_vceqhs: 1903 case Intrinsic::s390_vceqfs: 1904 case Intrinsic::s390_vceqgs: 1905 Opcode = SystemZISD::VICMPES; 1906 CCValid = SystemZ::CCMASK_VCMP; 1907 return true; 1908 1909 case Intrinsic::s390_vchbs: 1910 case Intrinsic::s390_vchhs: 1911 case Intrinsic::s390_vchfs: 1912 case Intrinsic::s390_vchgs: 1913 Opcode = SystemZISD::VICMPHS; 1914 CCValid = SystemZ::CCMASK_VCMP; 1915 return true; 1916 1917 case Intrinsic::s390_vchlbs: 1918 case Intrinsic::s390_vchlhs: 1919 case Intrinsic::s390_vchlfs: 1920 case Intrinsic::s390_vchlgs: 1921 Opcode = SystemZISD::VICMPHLS; 1922 CCValid = SystemZ::CCMASK_VCMP; 1923 return true; 1924 1925 case Intrinsic::s390_vtm: 1926 Opcode = SystemZISD::VTM; 1927 CCValid = SystemZ::CCMASK_VCMP; 1928 return true; 1929 1930 case Intrinsic::s390_vfaebs: 1931 case Intrinsic::s390_vfaehs: 1932 case Intrinsic::s390_vfaefs: 1933 Opcode = SystemZISD::VFAE_CC; 1934 CCValid = SystemZ::CCMASK_ANY; 1935 return true; 1936 1937 case Intrinsic::s390_vfaezbs: 1938 case Intrinsic::s390_vfaezhs: 1939 case Intrinsic::s390_vfaezfs: 1940 Opcode = SystemZISD::VFAEZ_CC; 1941 CCValid = SystemZ::CCMASK_ANY; 1942 return true; 1943 1944 case Intrinsic::s390_vfeebs: 1945 case Intrinsic::s390_vfeehs: 1946 case Intrinsic::s390_vfeefs: 1947 Opcode = SystemZISD::VFEE_CC; 1948 CCValid = SystemZ::CCMASK_ANY; 1949 return true; 1950 1951 case Intrinsic::s390_vfeezbs: 1952 case Intrinsic::s390_vfeezhs: 1953 case Intrinsic::s390_vfeezfs: 1954 Opcode = SystemZISD::VFEEZ_CC; 1955 CCValid = SystemZ::CCMASK_ANY; 1956 return true; 1957 1958 case Intrinsic::s390_vfenebs: 1959 case Intrinsic::s390_vfenehs: 1960 case Intrinsic::s390_vfenefs: 1961 Opcode = SystemZISD::VFENE_CC; 1962 CCValid = SystemZ::CCMASK_ANY; 1963 return true; 1964 1965 case Intrinsic::s390_vfenezbs: 1966 case Intrinsic::s390_vfenezhs: 1967 case Intrinsic::s390_vfenezfs: 1968 Opcode = SystemZISD::VFENEZ_CC; 1969 CCValid = SystemZ::CCMASK_ANY; 1970 return true; 1971 1972 case Intrinsic::s390_vistrbs: 1973 case Intrinsic::s390_vistrhs: 1974 case Intrinsic::s390_vistrfs: 1975 Opcode = SystemZISD::VISTR_CC; 1976 CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3; 1977 return true; 1978 1979 case Intrinsic::s390_vstrcbs: 1980 case Intrinsic::s390_vstrchs: 1981 case Intrinsic::s390_vstrcfs: 1982 Opcode = SystemZISD::VSTRC_CC; 1983 CCValid = SystemZ::CCMASK_ANY; 1984 return true; 1985 1986 case Intrinsic::s390_vstrczbs: 1987 case Intrinsic::s390_vstrczhs: 1988 case Intrinsic::s390_vstrczfs: 1989 Opcode = SystemZISD::VSTRCZ_CC; 1990 CCValid = SystemZ::CCMASK_ANY; 1991 return true; 1992 1993 case Intrinsic::s390_vstrsb: 1994 case Intrinsic::s390_vstrsh: 1995 case Intrinsic::s390_vstrsf: 1996 Opcode = SystemZISD::VSTRS_CC; 1997 CCValid = SystemZ::CCMASK_ANY; 1998 return true; 1999 2000 case Intrinsic::s390_vstrszb: 2001 case Intrinsic::s390_vstrszh: 2002 case Intrinsic::s390_vstrszf: 2003 Opcode = SystemZISD::VSTRSZ_CC; 2004 CCValid = SystemZ::CCMASK_ANY; 2005 return true; 2006 2007 case Intrinsic::s390_vfcedbs: 2008 case Intrinsic::s390_vfcesbs: 2009 Opcode = SystemZISD::VFCMPES; 2010 CCValid = SystemZ::CCMASK_VCMP; 2011 return true; 2012 2013 case Intrinsic::s390_vfchdbs: 2014 case Intrinsic::s390_vfchsbs: 2015 Opcode = SystemZISD::VFCMPHS; 2016 CCValid = SystemZ::CCMASK_VCMP; 2017 return true; 2018 2019 case Intrinsic::s390_vfchedbs: 2020 case Intrinsic::s390_vfchesbs: 2021 Opcode = SystemZISD::VFCMPHES; 2022 CCValid = SystemZ::CCMASK_VCMP; 2023 return true; 2024 2025 case Intrinsic::s390_vftcidb: 2026 case Intrinsic::s390_vftcisb: 2027 Opcode = SystemZISD::VFTCI; 2028 CCValid = SystemZ::CCMASK_VCMP; 2029 return true; 2030 2031 case Intrinsic::s390_tdc: 2032 Opcode = SystemZISD::TDC; 2033 CCValid = SystemZ::CCMASK_TDC; 2034 return true; 2035 2036 default: 2037 return false; 2038 } 2039 } 2040 2041 // Emit an intrinsic with chain and an explicit CC register result. 2042 static SDNode *emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, 2043 unsigned Opcode) { 2044 // Copy all operands except the intrinsic ID. 2045 unsigned NumOps = Op.getNumOperands(); 2046 SmallVector<SDValue, 6> Ops; 2047 Ops.reserve(NumOps - 1); 2048 Ops.push_back(Op.getOperand(0)); 2049 for (unsigned I = 2; I < NumOps; ++I) 2050 Ops.push_back(Op.getOperand(I)); 2051 2052 assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); 2053 SDVTList RawVTs = DAG.getVTList(MVT::i32, MVT::Other); 2054 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops); 2055 SDValue OldChain = SDValue(Op.getNode(), 1); 2056 SDValue NewChain = SDValue(Intr.getNode(), 1); 2057 DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain); 2058 return Intr.getNode(); 2059 } 2060 2061 // Emit an intrinsic with an explicit CC register result. 2062 static SDNode *emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, 2063 unsigned Opcode) { 2064 // Copy all operands except the intrinsic ID. 2065 unsigned NumOps = Op.getNumOperands(); 2066 SmallVector<SDValue, 6> Ops; 2067 Ops.reserve(NumOps - 1); 2068 for (unsigned I = 1; I < NumOps; ++I) 2069 Ops.push_back(Op.getOperand(I)); 2070 2071 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), Op->getVTList(), Ops); 2072 return Intr.getNode(); 2073 } 2074 2075 // CC is a comparison that will be implemented using an integer or 2076 // floating-point comparison. Return the condition code mask for 2077 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 2078 // unsigned comparisons and clear for signed ones. In the floating-point 2079 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 2080 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 2081 #define CONV(X) \ 2082 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 2083 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 2084 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 2085 2086 switch (CC) { 2087 default: 2088 llvm_unreachable("Invalid integer condition!"); 2089 2090 CONV(EQ); 2091 CONV(NE); 2092 CONV(GT); 2093 CONV(GE); 2094 CONV(LT); 2095 CONV(LE); 2096 2097 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 2098 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 2099 } 2100 #undef CONV 2101 } 2102 2103 // If C can be converted to a comparison against zero, adjust the operands 2104 // as necessary. 2105 static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { 2106 if (C.ICmpType == SystemZICMP::UnsignedOnly) 2107 return; 2108 2109 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); 2110 if (!ConstOp1) 2111 return; 2112 2113 int64_t Value = ConstOp1->getSExtValue(); 2114 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || 2115 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || 2116 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || 2117 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { 2118 C.CCMask ^= SystemZ::CCMASK_CMP_EQ; 2119 C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType()); 2120 } 2121 } 2122 2123 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, 2124 // adjust the operands as necessary. 2125 static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, 2126 Comparison &C) { 2127 // For us to make any changes, it must a comparison between a single-use 2128 // load and a constant. 2129 if (!C.Op0.hasOneUse() || 2130 C.Op0.getOpcode() != ISD::LOAD || 2131 C.Op1.getOpcode() != ISD::Constant) 2132 return; 2133 2134 // We must have an 8- or 16-bit load. 2135 auto *Load = cast<LoadSDNode>(C.Op0); 2136 unsigned NumBits = Load->getMemoryVT().getSizeInBits(); 2137 if ((NumBits != 8 && NumBits != 16) || 2138 NumBits != Load->getMemoryVT().getStoreSizeInBits()) 2139 return; 2140 2141 // The load must be an extending one and the constant must be within the 2142 // range of the unextended value. 2143 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1); 2144 uint64_t Value = ConstOp1->getZExtValue(); 2145 uint64_t Mask = (1 << NumBits) - 1; 2146 if (Load->getExtensionType() == ISD::SEXTLOAD) { 2147 // Make sure that ConstOp1 is in range of C.Op0. 2148 int64_t SignedValue = ConstOp1->getSExtValue(); 2149 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) 2150 return; 2151 if (C.ICmpType != SystemZICMP::SignedOnly) { 2152 // Unsigned comparison between two sign-extended values is equivalent 2153 // to unsigned comparison between two zero-extended values. 2154 Value &= Mask; 2155 } else if (NumBits == 8) { 2156 // Try to treat the comparison as unsigned, so that we can use CLI. 2157 // Adjust CCMask and Value as necessary. 2158 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) 2159 // Test whether the high bit of the byte is set. 2160 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; 2161 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) 2162 // Test whether the high bit of the byte is clear. 2163 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; 2164 else 2165 // No instruction exists for this combination. 2166 return; 2167 C.ICmpType = SystemZICMP::UnsignedOnly; 2168 } 2169 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 2170 if (Value > Mask) 2171 return; 2172 // If the constant is in range, we can use any comparison. 2173 C.ICmpType = SystemZICMP::Any; 2174 } else 2175 return; 2176 2177 // Make sure that the first operand is an i32 of the right extension type. 2178 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? 2179 ISD::SEXTLOAD : 2180 ISD::ZEXTLOAD); 2181 if (C.Op0.getValueType() != MVT::i32 || 2182 Load->getExtensionType() != ExtType) { 2183 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(), 2184 Load->getBasePtr(), Load->getPointerInfo(), 2185 Load->getMemoryVT(), Load->getAlignment(), 2186 Load->getMemOperand()->getFlags()); 2187 // Update the chain uses. 2188 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1)); 2189 } 2190 2191 // Make sure that the second operand is an i32 with the right value. 2192 if (C.Op1.getValueType() != MVT::i32 || 2193 Value != ConstOp1->getZExtValue()) 2194 C.Op1 = DAG.getConstant(Value, DL, MVT::i32); 2195 } 2196 2197 // Return true if Op is either an unextended load, or a load suitable 2198 // for integer register-memory comparisons of type ICmpType. 2199 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { 2200 auto *Load = dyn_cast<LoadSDNode>(Op.getNode()); 2201 if (Load) { 2202 // There are no instructions to compare a register with a memory byte. 2203 if (Load->getMemoryVT() == MVT::i8) 2204 return false; 2205 // Otherwise decide on extension type. 2206 switch (Load->getExtensionType()) { 2207 case ISD::NON_EXTLOAD: 2208 return true; 2209 case ISD::SEXTLOAD: 2210 return ICmpType != SystemZICMP::UnsignedOnly; 2211 case ISD::ZEXTLOAD: 2212 return ICmpType != SystemZICMP::SignedOnly; 2213 default: 2214 break; 2215 } 2216 } 2217 return false; 2218 } 2219 2220 // Return true if it is better to swap the operands of C. 2221 static bool shouldSwapCmpOperands(const Comparison &C) { 2222 // Leave f128 comparisons alone, since they have no memory forms. 2223 if (C.Op0.getValueType() == MVT::f128) 2224 return false; 2225 2226 // Always keep a floating-point constant second, since comparisons with 2227 // zero can use LOAD TEST and comparisons with other constants make a 2228 // natural memory operand. 2229 if (isa<ConstantFPSDNode>(C.Op1)) 2230 return false; 2231 2232 // Never swap comparisons with zero since there are many ways to optimize 2233 // those later. 2234 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 2235 if (ConstOp1 && ConstOp1->getZExtValue() == 0) 2236 return false; 2237 2238 // Also keep natural memory operands second if the loaded value is 2239 // only used here. Several comparisons have memory forms. 2240 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) 2241 return false; 2242 2243 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. 2244 // In that case we generally prefer the memory to be second. 2245 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { 2246 // The only exceptions are when the second operand is a constant and 2247 // we can use things like CHHSI. 2248 if (!ConstOp1) 2249 return true; 2250 // The unsigned memory-immediate instructions can handle 16-bit 2251 // unsigned integers. 2252 if (C.ICmpType != SystemZICMP::SignedOnly && 2253 isUInt<16>(ConstOp1->getZExtValue())) 2254 return false; 2255 // The signed memory-immediate instructions can handle 16-bit 2256 // signed integers. 2257 if (C.ICmpType != SystemZICMP::UnsignedOnly && 2258 isInt<16>(ConstOp1->getSExtValue())) 2259 return false; 2260 return true; 2261 } 2262 2263 // Try to promote the use of CGFR and CLGFR. 2264 unsigned Opcode0 = C.Op0.getOpcode(); 2265 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) 2266 return true; 2267 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) 2268 return true; 2269 if (C.ICmpType != SystemZICMP::SignedOnly && 2270 Opcode0 == ISD::AND && 2271 C.Op0.getOperand(1).getOpcode() == ISD::Constant && 2272 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) 2273 return true; 2274 2275 return false; 2276 } 2277 2278 // Check whether C tests for equality between X and Y and whether X - Y 2279 // or Y - X is also computed. In that case it's better to compare the 2280 // result of the subtraction against zero. 2281 static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, 2282 Comparison &C) { 2283 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 2284 C.CCMask == SystemZ::CCMASK_CMP_NE) { 2285 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 2286 SDNode *N = *I; 2287 if (N->getOpcode() == ISD::SUB && 2288 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || 2289 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { 2290 C.Op0 = SDValue(N, 0); 2291 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0)); 2292 return; 2293 } 2294 } 2295 } 2296 } 2297 2298 // Check whether C compares a floating-point value with zero and if that 2299 // floating-point value is also negated. In this case we can use the 2300 // negation to set CC, so avoiding separate LOAD AND TEST and 2301 // LOAD (NEGATIVE/COMPLEMENT) instructions. 2302 static void adjustForFNeg(Comparison &C) { 2303 // This optimization is invalid for strict comparisons, since FNEG 2304 // does not raise any exceptions. 2305 if (C.Chain) 2306 return; 2307 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); 2308 if (C1 && C1->isZero()) { 2309 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 2310 SDNode *N = *I; 2311 if (N->getOpcode() == ISD::FNEG) { 2312 C.Op0 = SDValue(N, 0); 2313 C.CCMask = SystemZ::reverseCCMask(C.CCMask); 2314 return; 2315 } 2316 } 2317 } 2318 } 2319 2320 // Check whether C compares (shl X, 32) with 0 and whether X is 2321 // also sign-extended. In that case it is better to test the result 2322 // of the sign extension using LTGFR. 2323 // 2324 // This case is important because InstCombine transforms a comparison 2325 // with (sext (trunc X)) into a comparison with (shl X, 32). 2326 static void adjustForLTGFR(Comparison &C) { 2327 // Check for a comparison between (shl X, 32) and 0. 2328 if (C.Op0.getOpcode() == ISD::SHL && 2329 C.Op0.getValueType() == MVT::i64 && 2330 C.Op1.getOpcode() == ISD::Constant && 2331 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 2332 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 2333 if (C1 && C1->getZExtValue() == 32) { 2334 SDValue ShlOp0 = C.Op0.getOperand(0); 2335 // See whether X has any SIGN_EXTEND_INREG uses. 2336 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) { 2337 SDNode *N = *I; 2338 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && 2339 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { 2340 C.Op0 = SDValue(N, 0); 2341 return; 2342 } 2343 } 2344 } 2345 } 2346 } 2347 2348 // If C compares the truncation of an extending load, try to compare 2349 // the untruncated value instead. This exposes more opportunities to 2350 // reuse CC. 2351 static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, 2352 Comparison &C) { 2353 if (C.Op0.getOpcode() == ISD::TRUNCATE && 2354 C.Op0.getOperand(0).getOpcode() == ISD::LOAD && 2355 C.Op1.getOpcode() == ISD::Constant && 2356 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 2357 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); 2358 if (L->getMemoryVT().getStoreSizeInBits().getFixedSize() <= 2359 C.Op0.getValueSizeInBits().getFixedSize()) { 2360 unsigned Type = L->getExtensionType(); 2361 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || 2362 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { 2363 C.Op0 = C.Op0.getOperand(0); 2364 C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType()); 2365 } 2366 } 2367 } 2368 } 2369 2370 // Return true if shift operation N has an in-range constant shift value. 2371 // Store it in ShiftVal if so. 2372 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { 2373 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); 2374 if (!Shift) 2375 return false; 2376 2377 uint64_t Amount = Shift->getZExtValue(); 2378 if (Amount >= N.getValueSizeInBits()) 2379 return false; 2380 2381 ShiftVal = Amount; 2382 return true; 2383 } 2384 2385 // Check whether an AND with Mask is suitable for a TEST UNDER MASK 2386 // instruction and whether the CC value is descriptive enough to handle 2387 // a comparison of type Opcode between the AND result and CmpVal. 2388 // CCMask says which comparison result is being tested and BitSize is 2389 // the number of bits in the operands. If TEST UNDER MASK can be used, 2390 // return the corresponding CC mask, otherwise return 0. 2391 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, 2392 uint64_t Mask, uint64_t CmpVal, 2393 unsigned ICmpType) { 2394 assert(Mask != 0 && "ANDs with zero should have been removed by now"); 2395 2396 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. 2397 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && 2398 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) 2399 return 0; 2400 2401 // Work out the masks for the lowest and highest bits. 2402 unsigned HighShift = 63 - countLeadingZeros(Mask); 2403 uint64_t High = uint64_t(1) << HighShift; 2404 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); 2405 2406 // Signed ordered comparisons are effectively unsigned if the sign 2407 // bit is dropped. 2408 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); 2409 2410 // Check for equality comparisons with 0, or the equivalent. 2411 if (CmpVal == 0) { 2412 if (CCMask == SystemZ::CCMASK_CMP_EQ) 2413 return SystemZ::CCMASK_TM_ALL_0; 2414 if (CCMask == SystemZ::CCMASK_CMP_NE) 2415 return SystemZ::CCMASK_TM_SOME_1; 2416 } 2417 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) { 2418 if (CCMask == SystemZ::CCMASK_CMP_LT) 2419 return SystemZ::CCMASK_TM_ALL_0; 2420 if (CCMask == SystemZ::CCMASK_CMP_GE) 2421 return SystemZ::CCMASK_TM_SOME_1; 2422 } 2423 if (EffectivelyUnsigned && CmpVal < Low) { 2424 if (CCMask == SystemZ::CCMASK_CMP_LE) 2425 return SystemZ::CCMASK_TM_ALL_0; 2426 if (CCMask == SystemZ::CCMASK_CMP_GT) 2427 return SystemZ::CCMASK_TM_SOME_1; 2428 } 2429 2430 // Check for equality comparisons with the mask, or the equivalent. 2431 if (CmpVal == Mask) { 2432 if (CCMask == SystemZ::CCMASK_CMP_EQ) 2433 return SystemZ::CCMASK_TM_ALL_1; 2434 if (CCMask == SystemZ::CCMASK_CMP_NE) 2435 return SystemZ::CCMASK_TM_SOME_0; 2436 } 2437 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { 2438 if (CCMask == SystemZ::CCMASK_CMP_GT) 2439 return SystemZ::CCMASK_TM_ALL_1; 2440 if (CCMask == SystemZ::CCMASK_CMP_LE) 2441 return SystemZ::CCMASK_TM_SOME_0; 2442 } 2443 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { 2444 if (CCMask == SystemZ::CCMASK_CMP_GE) 2445 return SystemZ::CCMASK_TM_ALL_1; 2446 if (CCMask == SystemZ::CCMASK_CMP_LT) 2447 return SystemZ::CCMASK_TM_SOME_0; 2448 } 2449 2450 // Check for ordered comparisons with the top bit. 2451 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { 2452 if (CCMask == SystemZ::CCMASK_CMP_LE) 2453 return SystemZ::CCMASK_TM_MSB_0; 2454 if (CCMask == SystemZ::CCMASK_CMP_GT) 2455 return SystemZ::CCMASK_TM_MSB_1; 2456 } 2457 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { 2458 if (CCMask == SystemZ::CCMASK_CMP_LT) 2459 return SystemZ::CCMASK_TM_MSB_0; 2460 if (CCMask == SystemZ::CCMASK_CMP_GE) 2461 return SystemZ::CCMASK_TM_MSB_1; 2462 } 2463 2464 // If there are just two bits, we can do equality checks for Low and High 2465 // as well. 2466 if (Mask == Low + High) { 2467 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) 2468 return SystemZ::CCMASK_TM_MIXED_MSB_0; 2469 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) 2470 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; 2471 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) 2472 return SystemZ::CCMASK_TM_MIXED_MSB_1; 2473 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) 2474 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; 2475 } 2476 2477 // Looks like we've exhausted our options. 2478 return 0; 2479 } 2480 2481 // See whether C can be implemented as a TEST UNDER MASK instruction. 2482 // Update the arguments with the TM version if so. 2483 static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, 2484 Comparison &C) { 2485 // Check that we have a comparison with a constant. 2486 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 2487 if (!ConstOp1) 2488 return; 2489 uint64_t CmpVal = ConstOp1->getZExtValue(); 2490 2491 // Check whether the nonconstant input is an AND with a constant mask. 2492 Comparison NewC(C); 2493 uint64_t MaskVal; 2494 ConstantSDNode *Mask = nullptr; 2495 if (C.Op0.getOpcode() == ISD::AND) { 2496 NewC.Op0 = C.Op0.getOperand(0); 2497 NewC.Op1 = C.Op0.getOperand(1); 2498 Mask = dyn_cast<ConstantSDNode>(NewC.Op1); 2499 if (!Mask) 2500 return; 2501 MaskVal = Mask->getZExtValue(); 2502 } else { 2503 // There is no instruction to compare with a 64-bit immediate 2504 // so use TMHH instead if possible. We need an unsigned ordered 2505 // comparison with an i64 immediate. 2506 if (NewC.Op0.getValueType() != MVT::i64 || 2507 NewC.CCMask == SystemZ::CCMASK_CMP_EQ || 2508 NewC.CCMask == SystemZ::CCMASK_CMP_NE || 2509 NewC.ICmpType == SystemZICMP::SignedOnly) 2510 return; 2511 // Convert LE and GT comparisons into LT and GE. 2512 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || 2513 NewC.CCMask == SystemZ::CCMASK_CMP_GT) { 2514 if (CmpVal == uint64_t(-1)) 2515 return; 2516 CmpVal += 1; 2517 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; 2518 } 2519 // If the low N bits of Op1 are zero than the low N bits of Op0 can 2520 // be masked off without changing the result. 2521 MaskVal = -(CmpVal & -CmpVal); 2522 NewC.ICmpType = SystemZICMP::UnsignedOnly; 2523 } 2524 if (!MaskVal) 2525 return; 2526 2527 // Check whether the combination of mask, comparison value and comparison 2528 // type are suitable. 2529 unsigned BitSize = NewC.Op0.getValueSizeInBits(); 2530 unsigned NewCCMask, ShiftVal; 2531 if (NewC.ICmpType != SystemZICMP::SignedOnly && 2532 NewC.Op0.getOpcode() == ISD::SHL && 2533 isSimpleShift(NewC.Op0, ShiftVal) && 2534 (MaskVal >> ShiftVal != 0) && 2535 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal && 2536 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 2537 MaskVal >> ShiftVal, 2538 CmpVal >> ShiftVal, 2539 SystemZICMP::Any))) { 2540 NewC.Op0 = NewC.Op0.getOperand(0); 2541 MaskVal >>= ShiftVal; 2542 } else if (NewC.ICmpType != SystemZICMP::SignedOnly && 2543 NewC.Op0.getOpcode() == ISD::SRL && 2544 isSimpleShift(NewC.Op0, ShiftVal) && 2545 (MaskVal << ShiftVal != 0) && 2546 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal && 2547 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 2548 MaskVal << ShiftVal, 2549 CmpVal << ShiftVal, 2550 SystemZICMP::UnsignedOnly))) { 2551 NewC.Op0 = NewC.Op0.getOperand(0); 2552 MaskVal <<= ShiftVal; 2553 } else { 2554 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, 2555 NewC.ICmpType); 2556 if (!NewCCMask) 2557 return; 2558 } 2559 2560 // Go ahead and make the change. 2561 C.Opcode = SystemZISD::TM; 2562 C.Op0 = NewC.Op0; 2563 if (Mask && Mask->getZExtValue() == MaskVal) 2564 C.Op1 = SDValue(Mask, 0); 2565 else 2566 C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType()); 2567 C.CCValid = SystemZ::CCMASK_TM; 2568 C.CCMask = NewCCMask; 2569 } 2570 2571 // See whether the comparison argument contains a redundant AND 2572 // and remove it if so. This sometimes happens due to the generic 2573 // BRCOND expansion. 2574 static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, 2575 Comparison &C) { 2576 if (C.Op0.getOpcode() != ISD::AND) 2577 return; 2578 auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 2579 if (!Mask) 2580 return; 2581 KnownBits Known = DAG.computeKnownBits(C.Op0.getOperand(0)); 2582 if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue()) 2583 return; 2584 2585 C.Op0 = C.Op0.getOperand(0); 2586 } 2587 2588 // Return a Comparison that tests the condition-code result of intrinsic 2589 // node Call against constant integer CC using comparison code Cond. 2590 // Opcode is the opcode of the SystemZISD operation for the intrinsic 2591 // and CCValid is the set of possible condition-code results. 2592 static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, 2593 SDValue Call, unsigned CCValid, uint64_t CC, 2594 ISD::CondCode Cond) { 2595 Comparison C(Call, SDValue(), SDValue()); 2596 C.Opcode = Opcode; 2597 C.CCValid = CCValid; 2598 if (Cond == ISD::SETEQ) 2599 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3. 2600 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0; 2601 else if (Cond == ISD::SETNE) 2602 // ...and the inverse of that. 2603 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1; 2604 else if (Cond == ISD::SETLT || Cond == ISD::SETULT) 2605 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3, 2606 // always true for CC>3. 2607 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1; 2608 else if (Cond == ISD::SETGE || Cond == ISD::SETUGE) 2609 // ...and the inverse of that. 2610 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0; 2611 else if (Cond == ISD::SETLE || Cond == ISD::SETULE) 2612 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true), 2613 // always true for CC>3. 2614 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1; 2615 else if (Cond == ISD::SETGT || Cond == ISD::SETUGT) 2616 // ...and the inverse of that. 2617 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0; 2618 else 2619 llvm_unreachable("Unexpected integer comparison type"); 2620 C.CCMask &= CCValid; 2621 return C; 2622 } 2623 2624 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. 2625 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 2626 ISD::CondCode Cond, const SDLoc &DL, 2627 SDValue Chain = SDValue(), 2628 bool IsSignaling = false) { 2629 if (CmpOp1.getOpcode() == ISD::Constant) { 2630 assert(!Chain); 2631 uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue(); 2632 unsigned Opcode, CCValid; 2633 if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN && 2634 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) && 2635 isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid)) 2636 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); 2637 if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 2638 CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 && 2639 isIntrinsicWithCC(CmpOp0, Opcode, CCValid)) 2640 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); 2641 } 2642 Comparison C(CmpOp0, CmpOp1, Chain); 2643 C.CCMask = CCMaskForCondCode(Cond); 2644 if (C.Op0.getValueType().isFloatingPoint()) { 2645 C.CCValid = SystemZ::CCMASK_FCMP; 2646 if (!C.Chain) 2647 C.Opcode = SystemZISD::FCMP; 2648 else if (!IsSignaling) 2649 C.Opcode = SystemZISD::STRICT_FCMP; 2650 else 2651 C.Opcode = SystemZISD::STRICT_FCMPS; 2652 adjustForFNeg(C); 2653 } else { 2654 assert(!C.Chain); 2655 C.CCValid = SystemZ::CCMASK_ICMP; 2656 C.Opcode = SystemZISD::ICMP; 2657 // Choose the type of comparison. Equality and inequality tests can 2658 // use either signed or unsigned comparisons. The choice also doesn't 2659 // matter if both sign bits are known to be clear. In those cases we 2660 // want to give the main isel code the freedom to choose whichever 2661 // form fits best. 2662 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 2663 C.CCMask == SystemZ::CCMASK_CMP_NE || 2664 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) 2665 C.ICmpType = SystemZICMP::Any; 2666 else if (C.CCMask & SystemZ::CCMASK_CMP_UO) 2667 C.ICmpType = SystemZICMP::UnsignedOnly; 2668 else 2669 C.ICmpType = SystemZICMP::SignedOnly; 2670 C.CCMask &= ~SystemZ::CCMASK_CMP_UO; 2671 adjustForRedundantAnd(DAG, DL, C); 2672 adjustZeroCmp(DAG, DL, C); 2673 adjustSubwordCmp(DAG, DL, C); 2674 adjustForSubtraction(DAG, DL, C); 2675 adjustForLTGFR(C); 2676 adjustICmpTruncate(DAG, DL, C); 2677 } 2678 2679 if (shouldSwapCmpOperands(C)) { 2680 std::swap(C.Op0, C.Op1); 2681 C.CCMask = SystemZ::reverseCCMask(C.CCMask); 2682 } 2683 2684 adjustForTestUnderMask(DAG, DL, C); 2685 return C; 2686 } 2687 2688 // Emit the comparison instruction described by C. 2689 static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { 2690 if (!C.Op1.getNode()) { 2691 SDNode *Node; 2692 switch (C.Op0.getOpcode()) { 2693 case ISD::INTRINSIC_W_CHAIN: 2694 Node = emitIntrinsicWithCCAndChain(DAG, C.Op0, C.Opcode); 2695 return SDValue(Node, 0); 2696 case ISD::INTRINSIC_WO_CHAIN: 2697 Node = emitIntrinsicWithCC(DAG, C.Op0, C.Opcode); 2698 return SDValue(Node, Node->getNumValues() - 1); 2699 default: 2700 llvm_unreachable("Invalid comparison operands"); 2701 } 2702 } 2703 if (C.Opcode == SystemZISD::ICMP) 2704 return DAG.getNode(SystemZISD::ICMP, DL, MVT::i32, C.Op0, C.Op1, 2705 DAG.getTargetConstant(C.ICmpType, DL, MVT::i32)); 2706 if (C.Opcode == SystemZISD::TM) { 2707 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != 2708 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); 2709 return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1, 2710 DAG.getTargetConstant(RegisterOnly, DL, MVT::i32)); 2711 } 2712 if (C.Chain) { 2713 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); 2714 return DAG.getNode(C.Opcode, DL, VTs, C.Chain, C.Op0, C.Op1); 2715 } 2716 return DAG.getNode(C.Opcode, DL, MVT::i32, C.Op0, C.Op1); 2717 } 2718 2719 // Implement a 32-bit *MUL_LOHI operation by extending both operands to 2720 // 64 bits. Extend is the extension type to use. Store the high part 2721 // in Hi and the low part in Lo. 2722 static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, 2723 SDValue Op0, SDValue Op1, SDValue &Hi, 2724 SDValue &Lo) { 2725 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); 2726 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); 2727 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); 2728 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, 2729 DAG.getConstant(32, DL, MVT::i64)); 2730 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); 2731 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); 2732 } 2733 2734 // Lower a binary operation that produces two VT results, one in each 2735 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 2736 // and Opcode performs the GR128 operation. Store the even register result 2737 // in Even and the odd register result in Odd. 2738 static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 2739 unsigned Opcode, SDValue Op0, SDValue Op1, 2740 SDValue &Even, SDValue &Odd) { 2741 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1); 2742 bool Is32Bit = is32Bit(VT); 2743 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); 2744 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); 2745 } 2746 2747 // Return an i32 value that is 1 if the CC value produced by CCReg is 2748 // in the mask CCMask and 0 otherwise. CC is known to have a value 2749 // in CCValid, so other values can be ignored. 2750 static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, 2751 unsigned CCValid, unsigned CCMask) { 2752 SDValue Ops[] = {DAG.getConstant(1, DL, MVT::i32), 2753 DAG.getConstant(0, DL, MVT::i32), 2754 DAG.getTargetConstant(CCValid, DL, MVT::i32), 2755 DAG.getTargetConstant(CCMask, DL, MVT::i32), CCReg}; 2756 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops); 2757 } 2758 2759 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot 2760 // be done directly. Mode is CmpMode::Int for integer comparisons, CmpMode::FP 2761 // for regular floating-point comparisons, CmpMode::StrictFP for strict (quiet) 2762 // floating-point comparisons, and CmpMode::SignalingFP for strict signaling 2763 // floating-point comparisons. 2764 enum class CmpMode { Int, FP, StrictFP, SignalingFP }; 2765 static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode) { 2766 switch (CC) { 2767 case ISD::SETOEQ: 2768 case ISD::SETEQ: 2769 switch (Mode) { 2770 case CmpMode::Int: return SystemZISD::VICMPE; 2771 case CmpMode::FP: return SystemZISD::VFCMPE; 2772 case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPE; 2773 case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPES; 2774 } 2775 llvm_unreachable("Bad mode"); 2776 2777 case ISD::SETOGE: 2778 case ISD::SETGE: 2779 switch (Mode) { 2780 case CmpMode::Int: return 0; 2781 case CmpMode::FP: return SystemZISD::VFCMPHE; 2782 case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPHE; 2783 case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPHES; 2784 } 2785 llvm_unreachable("Bad mode"); 2786 2787 case ISD::SETOGT: 2788 case ISD::SETGT: 2789 switch (Mode) { 2790 case CmpMode::Int: return SystemZISD::VICMPH; 2791 case CmpMode::FP: return SystemZISD::VFCMPH; 2792 case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPH; 2793 case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPHS; 2794 } 2795 llvm_unreachable("Bad mode"); 2796 2797 case ISD::SETUGT: 2798 switch (Mode) { 2799 case CmpMode::Int: return SystemZISD::VICMPHL; 2800 case CmpMode::FP: return 0; 2801 case CmpMode::StrictFP: return 0; 2802 case CmpMode::SignalingFP: return 0; 2803 } 2804 llvm_unreachable("Bad mode"); 2805 2806 default: 2807 return 0; 2808 } 2809 } 2810 2811 // Return the SystemZISD vector comparison operation for CC or its inverse, 2812 // or 0 if neither can be done directly. Indicate in Invert whether the 2813 // result is for the inverse of CC. Mode is as above. 2814 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, 2815 bool &Invert) { 2816 if (unsigned Opcode = getVectorComparison(CC, Mode)) { 2817 Invert = false; 2818 return Opcode; 2819 } 2820 2821 CC = ISD::getSetCCInverse(CC, Mode == CmpMode::Int ? MVT::i32 : MVT::f32); 2822 if (unsigned Opcode = getVectorComparison(CC, Mode)) { 2823 Invert = true; 2824 return Opcode; 2825 } 2826 2827 return 0; 2828 } 2829 2830 // Return a v2f64 that contains the extended form of elements Start and Start+1 2831 // of v4f32 value Op. If Chain is nonnull, return the strict form. 2832 static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, 2833 SDValue Op, SDValue Chain) { 2834 int Mask[] = { Start, -1, Start + 1, -1 }; 2835 Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask); 2836 if (Chain) { 2837 SDVTList VTs = DAG.getVTList(MVT::v2f64, MVT::Other); 2838 return DAG.getNode(SystemZISD::STRICT_VEXTEND, DL, VTs, Chain, Op); 2839 } 2840 return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op); 2841 } 2842 2843 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode, 2844 // producing a result of type VT. If Chain is nonnull, return the strict form. 2845 SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode, 2846 const SDLoc &DL, EVT VT, 2847 SDValue CmpOp0, 2848 SDValue CmpOp1, 2849 SDValue Chain) const { 2850 // There is no hardware support for v4f32 (unless we have the vector 2851 // enhancements facility 1), so extend the vector into two v2f64s 2852 // and compare those. 2853 if (CmpOp0.getValueType() == MVT::v4f32 && 2854 !Subtarget.hasVectorEnhancements1()) { 2855 SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0, Chain); 2856 SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0, Chain); 2857 SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1, Chain); 2858 SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1, Chain); 2859 if (Chain) { 2860 SDVTList VTs = DAG.getVTList(MVT::v2i64, MVT::Other); 2861 SDValue HRes = DAG.getNode(Opcode, DL, VTs, Chain, H0, H1); 2862 SDValue LRes = DAG.getNode(Opcode, DL, VTs, Chain, L0, L1); 2863 SDValue Res = DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes); 2864 SDValue Chains[6] = { H0.getValue(1), L0.getValue(1), 2865 H1.getValue(1), L1.getValue(1), 2866 HRes.getValue(1), LRes.getValue(1) }; 2867 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 2868 SDValue Ops[2] = { Res, NewChain }; 2869 return DAG.getMergeValues(Ops, DL); 2870 } 2871 SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1); 2872 SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1); 2873 return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes); 2874 } 2875 if (Chain) { 2876 SDVTList VTs = DAG.getVTList(VT, MVT::Other); 2877 return DAG.getNode(Opcode, DL, VTs, Chain, CmpOp0, CmpOp1); 2878 } 2879 return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1); 2880 } 2881 2882 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing 2883 // an integer mask of type VT. If Chain is nonnull, we have a strict 2884 // floating-point comparison. If in addition IsSignaling is true, we have 2885 // a strict signaling floating-point comparison. 2886 SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG, 2887 const SDLoc &DL, EVT VT, 2888 ISD::CondCode CC, 2889 SDValue CmpOp0, 2890 SDValue CmpOp1, 2891 SDValue Chain, 2892 bool IsSignaling) const { 2893 bool IsFP = CmpOp0.getValueType().isFloatingPoint(); 2894 assert (!Chain || IsFP); 2895 assert (!IsSignaling || Chain); 2896 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP : 2897 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int; 2898 bool Invert = false; 2899 SDValue Cmp; 2900 switch (CC) { 2901 // Handle tests for order using (or (ogt y x) (oge x y)). 2902 case ISD::SETUO: 2903 Invert = true; 2904 LLVM_FALLTHROUGH; 2905 case ISD::SETO: { 2906 assert(IsFP && "Unexpected integer comparison"); 2907 SDValue LT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode), 2908 DL, VT, CmpOp1, CmpOp0, Chain); 2909 SDValue GE = getVectorCmp(DAG, getVectorComparison(ISD::SETOGE, Mode), 2910 DL, VT, CmpOp0, CmpOp1, Chain); 2911 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE); 2912 if (Chain) 2913 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 2914 LT.getValue(1), GE.getValue(1)); 2915 break; 2916 } 2917 2918 // Handle <> tests using (or (ogt y x) (ogt x y)). 2919 case ISD::SETUEQ: 2920 Invert = true; 2921 LLVM_FALLTHROUGH; 2922 case ISD::SETONE: { 2923 assert(IsFP && "Unexpected integer comparison"); 2924 SDValue LT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode), 2925 DL, VT, CmpOp1, CmpOp0, Chain); 2926 SDValue GT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode), 2927 DL, VT, CmpOp0, CmpOp1, Chain); 2928 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT); 2929 if (Chain) 2930 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 2931 LT.getValue(1), GT.getValue(1)); 2932 break; 2933 } 2934 2935 // Otherwise a single comparison is enough. It doesn't really 2936 // matter whether we try the inversion or the swap first, since 2937 // there are no cases where both work. 2938 default: 2939 if (unsigned Opcode = getVectorComparisonOrInvert(CC, Mode, Invert)) 2940 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1, Chain); 2941 else { 2942 CC = ISD::getSetCCSwappedOperands(CC); 2943 if (unsigned Opcode = getVectorComparisonOrInvert(CC, Mode, Invert)) 2944 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0, Chain); 2945 else 2946 llvm_unreachable("Unhandled comparison"); 2947 } 2948 if (Chain) 2949 Chain = Cmp.getValue(1); 2950 break; 2951 } 2952 if (Invert) { 2953 SDValue Mask = 2954 DAG.getSplatBuildVector(VT, DL, DAG.getConstant(-1, DL, MVT::i64)); 2955 Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask); 2956 } 2957 if (Chain && Chain.getNode() != Cmp.getNode()) { 2958 SDValue Ops[2] = { Cmp, Chain }; 2959 Cmp = DAG.getMergeValues(Ops, DL); 2960 } 2961 return Cmp; 2962 } 2963 2964 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, 2965 SelectionDAG &DAG) const { 2966 SDValue CmpOp0 = Op.getOperand(0); 2967 SDValue CmpOp1 = Op.getOperand(1); 2968 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2969 SDLoc DL(Op); 2970 EVT VT = Op.getValueType(); 2971 if (VT.isVector()) 2972 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1); 2973 2974 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 2975 SDValue CCReg = emitCmp(DAG, DL, C); 2976 return emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask); 2977 } 2978 2979 SDValue SystemZTargetLowering::lowerSTRICT_FSETCC(SDValue Op, 2980 SelectionDAG &DAG, 2981 bool IsSignaling) const { 2982 SDValue Chain = Op.getOperand(0); 2983 SDValue CmpOp0 = Op.getOperand(1); 2984 SDValue CmpOp1 = Op.getOperand(2); 2985 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get(); 2986 SDLoc DL(Op); 2987 EVT VT = Op.getNode()->getValueType(0); 2988 if (VT.isVector()) { 2989 SDValue Res = lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1, 2990 Chain, IsSignaling); 2991 return Res.getValue(Op.getResNo()); 2992 } 2993 2994 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL, Chain, IsSignaling)); 2995 SDValue CCReg = emitCmp(DAG, DL, C); 2996 CCReg->setFlags(Op->getFlags()); 2997 SDValue Result = emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask); 2998 SDValue Ops[2] = { Result, CCReg.getValue(1) }; 2999 return DAG.getMergeValues(Ops, DL); 3000 } 3001 3002 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 3003 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3004 SDValue CmpOp0 = Op.getOperand(2); 3005 SDValue CmpOp1 = Op.getOperand(3); 3006 SDValue Dest = Op.getOperand(4); 3007 SDLoc DL(Op); 3008 3009 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 3010 SDValue CCReg = emitCmp(DAG, DL, C); 3011 return DAG.getNode( 3012 SystemZISD::BR_CCMASK, DL, Op.getValueType(), Op.getOperand(0), 3013 DAG.getTargetConstant(C.CCValid, DL, MVT::i32), 3014 DAG.getTargetConstant(C.CCMask, DL, MVT::i32), Dest, CCReg); 3015 } 3016 3017 // Return true if Pos is CmpOp and Neg is the negative of CmpOp, 3018 // allowing Pos and Neg to be wider than CmpOp. 3019 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { 3020 return (Neg.getOpcode() == ISD::SUB && 3021 Neg.getOperand(0).getOpcode() == ISD::Constant && 3022 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 && 3023 Neg.getOperand(1) == Pos && 3024 (Pos == CmpOp || 3025 (Pos.getOpcode() == ISD::SIGN_EXTEND && 3026 Pos.getOperand(0) == CmpOp))); 3027 } 3028 3029 // Return the absolute or negative absolute of Op; IsNegative decides which. 3030 static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, 3031 bool IsNegative) { 3032 Op = DAG.getNode(ISD::ABS, DL, Op.getValueType(), Op); 3033 if (IsNegative) 3034 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), 3035 DAG.getConstant(0, DL, Op.getValueType()), Op); 3036 return Op; 3037 } 3038 3039 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 3040 SelectionDAG &DAG) const { 3041 SDValue CmpOp0 = Op.getOperand(0); 3042 SDValue CmpOp1 = Op.getOperand(1); 3043 SDValue TrueOp = Op.getOperand(2); 3044 SDValue FalseOp = Op.getOperand(3); 3045 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 3046 SDLoc DL(Op); 3047 3048 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 3049 3050 // Check for absolute and negative-absolute selections, including those 3051 // where the comparison value is sign-extended (for LPGFR and LNGFR). 3052 // This check supplements the one in DAGCombiner. 3053 if (C.Opcode == SystemZISD::ICMP && 3054 C.CCMask != SystemZ::CCMASK_CMP_EQ && 3055 C.CCMask != SystemZ::CCMASK_CMP_NE && 3056 C.Op1.getOpcode() == ISD::Constant && 3057 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 3058 if (isAbsolute(C.Op0, TrueOp, FalseOp)) 3059 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); 3060 if (isAbsolute(C.Op0, FalseOp, TrueOp)) 3061 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); 3062 } 3063 3064 SDValue CCReg = emitCmp(DAG, DL, C); 3065 SDValue Ops[] = {TrueOp, FalseOp, 3066 DAG.getTargetConstant(C.CCValid, DL, MVT::i32), 3067 DAG.getTargetConstant(C.CCMask, DL, MVT::i32), CCReg}; 3068 3069 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops); 3070 } 3071 3072 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 3073 SelectionDAG &DAG) const { 3074 SDLoc DL(Node); 3075 const GlobalValue *GV = Node->getGlobal(); 3076 int64_t Offset = Node->getOffset(); 3077 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3078 CodeModel::Model CM = DAG.getTarget().getCodeModel(); 3079 3080 SDValue Result; 3081 if (Subtarget.isPC32DBLSymbol(GV, CM)) { 3082 if (isInt<32>(Offset)) { 3083 // Assign anchors at 1<<12 byte boundaries. 3084 uint64_t Anchor = Offset & ~uint64_t(0xfff); 3085 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); 3086 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 3087 3088 // The offset can be folded into the address if it is aligned to a 3089 // halfword. 3090 Offset -= Anchor; 3091 if (Offset != 0 && (Offset & 1) == 0) { 3092 SDValue Full = 3093 DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); 3094 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); 3095 Offset = 0; 3096 } 3097 } else { 3098 // Conservatively load a constant offset greater than 32 bits into a 3099 // register below. 3100 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT); 3101 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 3102 } 3103 } else { 3104 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 3105 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 3106 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 3107 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3108 } 3109 3110 // If there was a non-zero offset that we didn't fold, create an explicit 3111 // addition for it. 3112 if (Offset != 0) 3113 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 3114 DAG.getConstant(Offset, DL, PtrVT)); 3115 3116 return Result; 3117 } 3118 3119 SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node, 3120 SelectionDAG &DAG, 3121 unsigned Opcode, 3122 SDValue GOTOffset) const { 3123 SDLoc DL(Node); 3124 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3125 SDValue Chain = DAG.getEntryNode(); 3126 SDValue Glue; 3127 3128 if (DAG.getMachineFunction().getFunction().getCallingConv() == 3129 CallingConv::GHC) 3130 report_fatal_error("In GHC calling convention TLS is not supported"); 3131 3132 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12. 3133 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 3134 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue); 3135 Glue = Chain.getValue(1); 3136 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue); 3137 Glue = Chain.getValue(1); 3138 3139 // The first call operand is the chain and the second is the TLS symbol. 3140 SmallVector<SDValue, 8> Ops; 3141 Ops.push_back(Chain); 3142 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL, 3143 Node->getValueType(0), 3144 0, 0)); 3145 3146 // Add argument registers to the end of the list so that they are 3147 // known live into the call. 3148 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT)); 3149 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT)); 3150 3151 // Add a register mask operand representing the call-preserved registers. 3152 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 3153 const uint32_t *Mask = 3154 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); 3155 assert(Mask && "Missing call preserved mask for calling convention"); 3156 Ops.push_back(DAG.getRegisterMask(Mask)); 3157 3158 // Glue the call to the argument copies. 3159 Ops.push_back(Glue); 3160 3161 // Emit the call. 3162 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 3163 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops); 3164 Glue = Chain.getValue(1); 3165 3166 // Copy the return value from %r2. 3167 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue); 3168 } 3169 3170 SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL, 3171 SelectionDAG &DAG) const { 3172 SDValue Chain = DAG.getEntryNode(); 3173 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3174 3175 // The high part of the thread pointer is in access register 0. 3176 SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32); 3177 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 3178 3179 // The low part of the thread pointer is in access register 1. 3180 SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32); 3181 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 3182 3183 // Merge them into a single 64-bit address. 3184 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 3185 DAG.getConstant(32, DL, PtrVT)); 3186 return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 3187 } 3188 3189 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 3190 SelectionDAG &DAG) const { 3191 if (DAG.getTarget().useEmulatedTLS()) 3192 return LowerToTLSEmulatedModel(Node, DAG); 3193 SDLoc DL(Node); 3194 const GlobalValue *GV = Node->getGlobal(); 3195 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3196 TLSModel::Model model = DAG.getTarget().getTLSModel(GV); 3197 3198 if (DAG.getMachineFunction().getFunction().getCallingConv() == 3199 CallingConv::GHC) 3200 report_fatal_error("In GHC calling convention TLS is not supported"); 3201 3202 SDValue TP = lowerThreadPointer(DL, DAG); 3203 3204 // Get the offset of GA from the thread pointer, based on the TLS model. 3205 SDValue Offset; 3206 switch (model) { 3207 case TLSModel::GeneralDynamic: { 3208 // Load the GOT offset of the tls_index (module ID / per-symbol offset). 3209 SystemZConstantPoolValue *CPV = 3210 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD); 3211 3212 Offset = DAG.getConstantPool(CPV, PtrVT, Align(8)); 3213 Offset = DAG.getLoad( 3214 PtrVT, DL, DAG.getEntryNode(), Offset, 3215 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3216 3217 // Call __tls_get_offset to retrieve the offset. 3218 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset); 3219 break; 3220 } 3221 3222 case TLSModel::LocalDynamic: { 3223 // Load the GOT offset of the module ID. 3224 SystemZConstantPoolValue *CPV = 3225 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM); 3226 3227 Offset = DAG.getConstantPool(CPV, PtrVT, Align(8)); 3228 Offset = DAG.getLoad( 3229 PtrVT, DL, DAG.getEntryNode(), Offset, 3230 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3231 3232 // Call __tls_get_offset to retrieve the module base offset. 3233 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset); 3234 3235 // Note: The SystemZLDCleanupPass will remove redundant computations 3236 // of the module base offset. Count total number of local-dynamic 3237 // accesses to trigger execution of that pass. 3238 SystemZMachineFunctionInfo* MFI = 3239 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>(); 3240 MFI->incNumLocalDynamicTLSAccesses(); 3241 3242 // Add the per-symbol offset. 3243 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF); 3244 3245 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, Align(8)); 3246 DTPOffset = DAG.getLoad( 3247 PtrVT, DL, DAG.getEntryNode(), DTPOffset, 3248 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3249 3250 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset); 3251 break; 3252 } 3253 3254 case TLSModel::InitialExec: { 3255 // Load the offset from the GOT. 3256 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 3257 SystemZII::MO_INDNTPOFF); 3258 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset); 3259 Offset = 3260 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset, 3261 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3262 break; 3263 } 3264 3265 case TLSModel::LocalExec: { 3266 // Force the offset into the constant pool and load it from there. 3267 SystemZConstantPoolValue *CPV = 3268 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 3269 3270 Offset = DAG.getConstantPool(CPV, PtrVT, Align(8)); 3271 Offset = DAG.getLoad( 3272 PtrVT, DL, DAG.getEntryNode(), Offset, 3273 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3274 break; 3275 } 3276 } 3277 3278 // Add the base and offset together. 3279 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 3280 } 3281 3282 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 3283 SelectionDAG &DAG) const { 3284 SDLoc DL(Node); 3285 const BlockAddress *BA = Node->getBlockAddress(); 3286 int64_t Offset = Node->getOffset(); 3287 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3288 3289 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 3290 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 3291 return Result; 3292 } 3293 3294 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 3295 SelectionDAG &DAG) const { 3296 SDLoc DL(JT); 3297 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3298 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 3299 3300 // Use LARL to load the address of the table. 3301 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 3302 } 3303 3304 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 3305 SelectionDAG &DAG) const { 3306 SDLoc DL(CP); 3307 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3308 3309 SDValue Result; 3310 if (CP->isMachineConstantPoolEntry()) 3311 Result = 3312 DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlign()); 3313 else 3314 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(), 3315 CP->getOffset()); 3316 3317 // Use LARL to load the address of the constant pool entry. 3318 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 3319 } 3320 3321 SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op, 3322 SelectionDAG &DAG) const { 3323 auto *TFL = 3324 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering()); 3325 MachineFunction &MF = DAG.getMachineFunction(); 3326 MachineFrameInfo &MFI = MF.getFrameInfo(); 3327 MFI.setFrameAddressIsTaken(true); 3328 3329 SDLoc DL(Op); 3330 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3331 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3332 3333 // By definition, the frame address is the address of the back chain. (In 3334 // the case of packed stack without backchain, return the address where the 3335 // backchain would have been stored. This will either be an unused space or 3336 // contain a saved register). 3337 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF); 3338 SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT); 3339 3340 // FIXME The frontend should detect this case. 3341 if (Depth > 0) { 3342 report_fatal_error("Unsupported stack frame traversal count"); 3343 } 3344 3345 return BackChain; 3346 } 3347 3348 SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op, 3349 SelectionDAG &DAG) const { 3350 MachineFunction &MF = DAG.getMachineFunction(); 3351 MachineFrameInfo &MFI = MF.getFrameInfo(); 3352 MFI.setReturnAddressIsTaken(true); 3353 3354 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 3355 return SDValue(); 3356 3357 SDLoc DL(Op); 3358 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3359 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3360 3361 // FIXME The frontend should detect this case. 3362 if (Depth > 0) { 3363 report_fatal_error("Unsupported stack frame traversal count"); 3364 } 3365 3366 // Return R14D, which has the return address. Mark it an implicit live-in. 3367 unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass); 3368 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT); 3369 } 3370 3371 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 3372 SelectionDAG &DAG) const { 3373 SDLoc DL(Op); 3374 SDValue In = Op.getOperand(0); 3375 EVT InVT = In.getValueType(); 3376 EVT ResVT = Op.getValueType(); 3377 3378 // Convert loads directly. This is normally done by DAGCombiner, 3379 // but we need this case for bitcasts that are created during lowering 3380 // and which are then lowered themselves. 3381 if (auto *LoadN = dyn_cast<LoadSDNode>(In)) 3382 if (ISD::isNormalLoad(LoadN)) { 3383 SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(), 3384 LoadN->getBasePtr(), LoadN->getMemOperand()); 3385 // Update the chain uses. 3386 DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1)); 3387 return NewLoad; 3388 } 3389 3390 if (InVT == MVT::i32 && ResVT == MVT::f32) { 3391 SDValue In64; 3392 if (Subtarget.hasHighWord()) { 3393 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, 3394 MVT::i64); 3395 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 3396 MVT::i64, SDValue(U64, 0), In); 3397 } else { 3398 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 3399 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, 3400 DAG.getConstant(32, DL, MVT::i64)); 3401 } 3402 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); 3403 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, 3404 DL, MVT::f32, Out64); 3405 } 3406 if (InVT == MVT::f32 && ResVT == MVT::i32) { 3407 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 3408 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 3409 MVT::f64, SDValue(U64, 0), In); 3410 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); 3411 if (Subtarget.hasHighWord()) 3412 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, 3413 MVT::i32, Out64); 3414 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, 3415 DAG.getConstant(32, DL, MVT::i64)); 3416 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 3417 } 3418 llvm_unreachable("Unexpected bitcast combination"); 3419 } 3420 3421 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 3422 SelectionDAG &DAG) const { 3423 MachineFunction &MF = DAG.getMachineFunction(); 3424 SystemZMachineFunctionInfo *FuncInfo = 3425 MF.getInfo<SystemZMachineFunctionInfo>(); 3426 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3427 3428 SDValue Chain = Op.getOperand(0); 3429 SDValue Addr = Op.getOperand(1); 3430 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3431 SDLoc DL(Op); 3432 3433 // The initial values of each field. 3434 const unsigned NumFields = 4; 3435 SDValue Fields[NumFields] = { 3436 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT), 3437 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT), 3438 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 3439 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 3440 }; 3441 3442 // Store each field into its respective slot. 3443 SDValue MemOps[NumFields]; 3444 unsigned Offset = 0; 3445 for (unsigned I = 0; I < NumFields; ++I) { 3446 SDValue FieldAddr = Addr; 3447 if (Offset != 0) 3448 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 3449 DAG.getIntPtrConstant(Offset, DL)); 3450 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 3451 MachinePointerInfo(SV, Offset)); 3452 Offset += 8; 3453 } 3454 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); 3455 } 3456 3457 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 3458 SelectionDAG &DAG) const { 3459 SDValue Chain = Op.getOperand(0); 3460 SDValue DstPtr = Op.getOperand(1); 3461 SDValue SrcPtr = Op.getOperand(2); 3462 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 3463 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 3464 SDLoc DL(Op); 3465 3466 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL), 3467 Align(8), /*isVolatile*/ false, /*AlwaysInline*/ false, 3468 /*isTailCall*/ false, MachinePointerInfo(DstSV), 3469 MachinePointerInfo(SrcSV)); 3470 } 3471 3472 SDValue SystemZTargetLowering:: 3473 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 3474 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 3475 MachineFunction &MF = DAG.getMachineFunction(); 3476 bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack"); 3477 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); 3478 3479 SDValue Chain = Op.getOperand(0); 3480 SDValue Size = Op.getOperand(1); 3481 SDValue Align = Op.getOperand(2); 3482 SDLoc DL(Op); 3483 3484 // If user has set the no alignment function attribute, ignore 3485 // alloca alignments. 3486 uint64_t AlignVal = 3487 (RealignOpt ? cast<ConstantSDNode>(Align)->getZExtValue() : 0); 3488 3489 uint64_t StackAlign = TFI->getStackAlignment(); 3490 uint64_t RequiredAlign = std::max(AlignVal, StackAlign); 3491 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign; 3492 3493 Register SPReg = getStackPointerRegisterToSaveRestore(); 3494 SDValue NeededSpace = Size; 3495 3496 // Get a reference to the stack pointer. 3497 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 3498 3499 // If we need a backchain, save it now. 3500 SDValue Backchain; 3501 if (StoreBackchain) 3502 Backchain = DAG.getLoad(MVT::i64, DL, Chain, getBackchainAddress(OldSP, DAG), 3503 MachinePointerInfo()); 3504 3505 // Add extra space for alignment if needed. 3506 if (ExtraAlignSpace) 3507 NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace, 3508 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); 3509 3510 // Get the new stack pointer value. 3511 SDValue NewSP; 3512 if (hasInlineStackProbe(MF)) { 3513 NewSP = DAG.getNode(SystemZISD::PROBED_ALLOCA, DL, 3514 DAG.getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace); 3515 Chain = NewSP.getValue(1); 3516 } 3517 else { 3518 NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace); 3519 // Copy the new stack pointer back. 3520 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 3521 } 3522 3523 // The allocated data lives above the 160 bytes allocated for the standard 3524 // frame, plus any outgoing stack arguments. We don't know how much that 3525 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 3526 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 3527 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 3528 3529 // Dynamically realign if needed. 3530 if (RequiredAlign > StackAlign) { 3531 Result = 3532 DAG.getNode(ISD::ADD, DL, MVT::i64, Result, 3533 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); 3534 Result = 3535 DAG.getNode(ISD::AND, DL, MVT::i64, Result, 3536 DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64)); 3537 } 3538 3539 if (StoreBackchain) 3540 Chain = DAG.getStore(Chain, DL, Backchain, getBackchainAddress(NewSP, DAG), 3541 MachinePointerInfo()); 3542 3543 SDValue Ops[2] = { Result, Chain }; 3544 return DAG.getMergeValues(Ops, DL); 3545 } 3546 3547 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET( 3548 SDValue Op, SelectionDAG &DAG) const { 3549 SDLoc DL(Op); 3550 3551 return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 3552 } 3553 3554 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, 3555 SelectionDAG &DAG) const { 3556 EVT VT = Op.getValueType(); 3557 SDLoc DL(Op); 3558 SDValue Ops[2]; 3559 if (is32Bit(VT)) 3560 // Just do a normal 64-bit multiplication and extract the results. 3561 // We define this so that it can be used for constant division. 3562 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), 3563 Op.getOperand(1), Ops[1], Ops[0]); 3564 else if (Subtarget.hasMiscellaneousExtensions2()) 3565 // SystemZISD::SMUL_LOHI returns the low result in the odd register and 3566 // the high result in the even register. ISD::SMUL_LOHI is defined to 3567 // return the low half first, so the results are in reverse order. 3568 lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI, 3569 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3570 else { 3571 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI: 3572 // 3573 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) 3574 // 3575 // but using the fact that the upper halves are either all zeros 3576 // or all ones: 3577 // 3578 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) 3579 // 3580 // and grouping the right terms together since they are quicker than the 3581 // multiplication: 3582 // 3583 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) 3584 SDValue C63 = DAG.getConstant(63, DL, MVT::i64); 3585 SDValue LL = Op.getOperand(0); 3586 SDValue RL = Op.getOperand(1); 3587 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); 3588 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); 3589 // SystemZISD::UMUL_LOHI returns the low result in the odd register and 3590 // the high result in the even register. ISD::SMUL_LOHI is defined to 3591 // return the low half first, so the results are in reverse order. 3592 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, 3593 LL, RL, Ops[1], Ops[0]); 3594 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); 3595 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); 3596 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); 3597 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); 3598 } 3599 return DAG.getMergeValues(Ops, DL); 3600 } 3601 3602 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 3603 SelectionDAG &DAG) const { 3604 EVT VT = Op.getValueType(); 3605 SDLoc DL(Op); 3606 SDValue Ops[2]; 3607 if (is32Bit(VT)) 3608 // Just do a normal 64-bit multiplication and extract the results. 3609 // We define this so that it can be used for constant division. 3610 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), 3611 Op.getOperand(1), Ops[1], Ops[0]); 3612 else 3613 // SystemZISD::UMUL_LOHI returns the low result in the odd register and 3614 // the high result in the even register. ISD::UMUL_LOHI is defined to 3615 // return the low half first, so the results are in reverse order. 3616 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, 3617 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3618 return DAG.getMergeValues(Ops, DL); 3619 } 3620 3621 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 3622 SelectionDAG &DAG) const { 3623 SDValue Op0 = Op.getOperand(0); 3624 SDValue Op1 = Op.getOperand(1); 3625 EVT VT = Op.getValueType(); 3626 SDLoc DL(Op); 3627 3628 // We use DSGF for 32-bit division. This means the first operand must 3629 // always be 64-bit, and the second operand should be 32-bit whenever 3630 // that is possible, to improve performance. 3631 if (is32Bit(VT)) 3632 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 3633 else if (DAG.ComputeNumSignBits(Op1) > 32) 3634 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 3635 3636 // DSG(F) returns the remainder in the even register and the 3637 // quotient in the odd register. 3638 SDValue Ops[2]; 3639 lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]); 3640 return DAG.getMergeValues(Ops, DL); 3641 } 3642 3643 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 3644 SelectionDAG &DAG) const { 3645 EVT VT = Op.getValueType(); 3646 SDLoc DL(Op); 3647 3648 // DL(G) returns the remainder in the even register and the 3649 // quotient in the odd register. 3650 SDValue Ops[2]; 3651 lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM, 3652 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3653 return DAG.getMergeValues(Ops, DL); 3654 } 3655 3656 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 3657 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 3658 3659 // Get the known-zero masks for each operand. 3660 SDValue Ops[] = {Op.getOperand(0), Op.getOperand(1)}; 3661 KnownBits Known[2] = {DAG.computeKnownBits(Ops[0]), 3662 DAG.computeKnownBits(Ops[1])}; 3663 3664 // See if the upper 32 bits of one operand and the lower 32 bits of the 3665 // other are known zero. They are the low and high operands respectively. 3666 uint64_t Masks[] = { Known[0].Zero.getZExtValue(), 3667 Known[1].Zero.getZExtValue() }; 3668 unsigned High, Low; 3669 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 3670 High = 1, Low = 0; 3671 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 3672 High = 0, Low = 1; 3673 else 3674 return Op; 3675 3676 SDValue LowOp = Ops[Low]; 3677 SDValue HighOp = Ops[High]; 3678 3679 // If the high part is a constant, we're better off using IILH. 3680 if (HighOp.getOpcode() == ISD::Constant) 3681 return Op; 3682 3683 // If the low part is a constant that is outside the range of LHI, 3684 // then we're better off using IILF. 3685 if (LowOp.getOpcode() == ISD::Constant) { 3686 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 3687 if (!isInt<16>(Value)) 3688 return Op; 3689 } 3690 3691 // Check whether the high part is an AND that doesn't change the 3692 // high 32 bits and just masks out low bits. We can skip it if so. 3693 if (HighOp.getOpcode() == ISD::AND && 3694 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 3695 SDValue HighOp0 = HighOp.getOperand(0); 3696 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue(); 3697 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) 3698 HighOp = HighOp0; 3699 } 3700 3701 // Take advantage of the fact that all GR32 operations only change the 3702 // low 32 bits by truncating Low to an i32 and inserting it directly 3703 // using a subreg. The interesting cases are those where the truncation 3704 // can be folded. 3705 SDLoc DL(Op); 3706 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 3707 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, 3708 MVT::i64, HighOp, Low32); 3709 } 3710 3711 // Lower SADDO/SSUBO/UADDO/USUBO nodes. 3712 SDValue SystemZTargetLowering::lowerXALUO(SDValue Op, 3713 SelectionDAG &DAG) const { 3714 SDNode *N = Op.getNode(); 3715 SDValue LHS = N->getOperand(0); 3716 SDValue RHS = N->getOperand(1); 3717 SDLoc DL(N); 3718 unsigned BaseOp = 0; 3719 unsigned CCValid = 0; 3720 unsigned CCMask = 0; 3721 3722 switch (Op.getOpcode()) { 3723 default: llvm_unreachable("Unknown instruction!"); 3724 case ISD::SADDO: 3725 BaseOp = SystemZISD::SADDO; 3726 CCValid = SystemZ::CCMASK_ARITH; 3727 CCMask = SystemZ::CCMASK_ARITH_OVERFLOW; 3728 break; 3729 case ISD::SSUBO: 3730 BaseOp = SystemZISD::SSUBO; 3731 CCValid = SystemZ::CCMASK_ARITH; 3732 CCMask = SystemZ::CCMASK_ARITH_OVERFLOW; 3733 break; 3734 case ISD::UADDO: 3735 BaseOp = SystemZISD::UADDO; 3736 CCValid = SystemZ::CCMASK_LOGICAL; 3737 CCMask = SystemZ::CCMASK_LOGICAL_CARRY; 3738 break; 3739 case ISD::USUBO: 3740 BaseOp = SystemZISD::USUBO; 3741 CCValid = SystemZ::CCMASK_LOGICAL; 3742 CCMask = SystemZ::CCMASK_LOGICAL_BORROW; 3743 break; 3744 } 3745 3746 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 3747 SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 3748 3749 SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask); 3750 if (N->getValueType(1) == MVT::i1) 3751 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC); 3752 3753 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC); 3754 } 3755 3756 static bool isAddCarryChain(SDValue Carry) { 3757 while (Carry.getOpcode() == ISD::ADDCARRY) 3758 Carry = Carry.getOperand(2); 3759 return Carry.getOpcode() == ISD::UADDO; 3760 } 3761 3762 static bool isSubBorrowChain(SDValue Carry) { 3763 while (Carry.getOpcode() == ISD::SUBCARRY) 3764 Carry = Carry.getOperand(2); 3765 return Carry.getOpcode() == ISD::USUBO; 3766 } 3767 3768 // Lower ADDCARRY/SUBCARRY nodes. 3769 SDValue SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op, 3770 SelectionDAG &DAG) const { 3771 3772 SDNode *N = Op.getNode(); 3773 MVT VT = N->getSimpleValueType(0); 3774 3775 // Let legalize expand this if it isn't a legal type yet. 3776 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 3777 return SDValue(); 3778 3779 SDValue LHS = N->getOperand(0); 3780 SDValue RHS = N->getOperand(1); 3781 SDValue Carry = Op.getOperand(2); 3782 SDLoc DL(N); 3783 unsigned BaseOp = 0; 3784 unsigned CCValid = 0; 3785 unsigned CCMask = 0; 3786 3787 switch (Op.getOpcode()) { 3788 default: llvm_unreachable("Unknown instruction!"); 3789 case ISD::ADDCARRY: 3790 if (!isAddCarryChain(Carry)) 3791 return SDValue(); 3792 3793 BaseOp = SystemZISD::ADDCARRY; 3794 CCValid = SystemZ::CCMASK_LOGICAL; 3795 CCMask = SystemZ::CCMASK_LOGICAL_CARRY; 3796 break; 3797 case ISD::SUBCARRY: 3798 if (!isSubBorrowChain(Carry)) 3799 return SDValue(); 3800 3801 BaseOp = SystemZISD::SUBCARRY; 3802 CCValid = SystemZ::CCMASK_LOGICAL; 3803 CCMask = SystemZ::CCMASK_LOGICAL_BORROW; 3804 break; 3805 } 3806 3807 // Set the condition code from the carry flag. 3808 Carry = DAG.getNode(SystemZISD::GET_CCMASK, DL, MVT::i32, Carry, 3809 DAG.getConstant(CCValid, DL, MVT::i32), 3810 DAG.getConstant(CCMask, DL, MVT::i32)); 3811 3812 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 3813 SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS, Carry); 3814 3815 SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask); 3816 if (N->getValueType(1) == MVT::i1) 3817 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC); 3818 3819 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC); 3820 } 3821 3822 SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op, 3823 SelectionDAG &DAG) const { 3824 EVT VT = Op.getValueType(); 3825 SDLoc DL(Op); 3826 Op = Op.getOperand(0); 3827 3828 // Handle vector types via VPOPCT. 3829 if (VT.isVector()) { 3830 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op); 3831 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op); 3832 switch (VT.getScalarSizeInBits()) { 3833 case 8: 3834 break; 3835 case 16: { 3836 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); 3837 SDValue Shift = DAG.getConstant(8, DL, MVT::i32); 3838 SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift); 3839 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); 3840 Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift); 3841 break; 3842 } 3843 case 32: { 3844 SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL, 3845 DAG.getConstant(0, DL, MVT::i32)); 3846 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); 3847 break; 3848 } 3849 case 64: { 3850 SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL, 3851 DAG.getConstant(0, DL, MVT::i32)); 3852 Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp); 3853 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); 3854 break; 3855 } 3856 default: 3857 llvm_unreachable("Unexpected type"); 3858 } 3859 return Op; 3860 } 3861 3862 // Get the known-zero mask for the operand. 3863 KnownBits Known = DAG.computeKnownBits(Op); 3864 unsigned NumSignificantBits = Known.getMaxValue().getActiveBits(); 3865 if (NumSignificantBits == 0) 3866 return DAG.getConstant(0, DL, VT); 3867 3868 // Skip known-zero high parts of the operand. 3869 int64_t OrigBitSize = VT.getSizeInBits(); 3870 int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits); 3871 BitSize = std::min(BitSize, OrigBitSize); 3872 3873 // The POPCNT instruction counts the number of bits in each byte. 3874 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op); 3875 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op); 3876 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); 3877 3878 // Add up per-byte counts in a binary tree. All bits of Op at 3879 // position larger than BitSize remain zero throughout. 3880 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) { 3881 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT)); 3882 if (BitSize != OrigBitSize) 3883 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp, 3884 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT)); 3885 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); 3886 } 3887 3888 // Extract overall result from high byte. 3889 if (BitSize > 8) 3890 Op = DAG.getNode(ISD::SRL, DL, VT, Op, 3891 DAG.getConstant(BitSize - 8, DL, VT)); 3892 3893 return Op; 3894 } 3895 3896 SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, 3897 SelectionDAG &DAG) const { 3898 SDLoc DL(Op); 3899 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 3900 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 3901 SyncScope::ID FenceSSID = static_cast<SyncScope::ID>( 3902 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 3903 3904 // The only fence that needs an instruction is a sequentially-consistent 3905 // cross-thread fence. 3906 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && 3907 FenceSSID == SyncScope::System) { 3908 return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, 3909 Op.getOperand(0)), 3910 0); 3911 } 3912 3913 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 3914 return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 3915 } 3916 3917 // Op is an atomic load. Lower it into a normal volatile load. 3918 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 3919 SelectionDAG &DAG) const { 3920 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3921 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), 3922 Node->getChain(), Node->getBasePtr(), 3923 Node->getMemoryVT(), Node->getMemOperand()); 3924 } 3925 3926 // Op is an atomic store. Lower it into a normal volatile store. 3927 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, 3928 SelectionDAG &DAG) const { 3929 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3930 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), 3931 Node->getBasePtr(), Node->getMemoryVT(), 3932 Node->getMemOperand()); 3933 // We have to enforce sequential consistency by performing a 3934 // serialization operation after the store. 3935 if (Node->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent) 3936 Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), 3937 MVT::Other, Chain), 0); 3938 return Chain; 3939 } 3940 3941 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 3942 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 3943 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, 3944 SelectionDAG &DAG, 3945 unsigned Opcode) const { 3946 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3947 3948 // 32-bit operations need no code outside the main loop. 3949 EVT NarrowVT = Node->getMemoryVT(); 3950 EVT WideVT = MVT::i32; 3951 if (NarrowVT == WideVT) 3952 return Op; 3953 3954 int64_t BitSize = NarrowVT.getSizeInBits(); 3955 SDValue ChainIn = Node->getChain(); 3956 SDValue Addr = Node->getBasePtr(); 3957 SDValue Src2 = Node->getVal(); 3958 MachineMemOperand *MMO = Node->getMemOperand(); 3959 SDLoc DL(Node); 3960 EVT PtrVT = Addr.getValueType(); 3961 3962 // Convert atomic subtracts of constants into additions. 3963 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 3964 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) { 3965 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 3966 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType()); 3967 } 3968 3969 // Get the address of the containing word. 3970 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 3971 DAG.getConstant(-4, DL, PtrVT)); 3972 3973 // Get the number of bits that the word must be rotated left in order 3974 // to bring the field to the top bits of a GR32. 3975 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 3976 DAG.getConstant(3, DL, PtrVT)); 3977 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 3978 3979 // Get the complementing shift amount, for rotating a field in the top 3980 // bits back to its proper position. 3981 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 3982 DAG.getConstant(0, DL, WideVT), BitShift); 3983 3984 // Extend the source operand to 32 bits and prepare it for the inner loop. 3985 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 3986 // operations require the source to be shifted in advance. (This shift 3987 // can be folded if the source is constant.) For AND and NAND, the lower 3988 // bits must be set, while for other opcodes they should be left clear. 3989 if (Opcode != SystemZISD::ATOMIC_SWAPW) 3990 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 3991 DAG.getConstant(32 - BitSize, DL, WideVT)); 3992 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 3993 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 3994 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 3995 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT)); 3996 3997 // Construct the ATOMIC_LOADW_* node. 3998 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 3999 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 4000 DAG.getConstant(BitSize, DL, WideVT) }; 4001 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 4002 NarrowVT, MMO); 4003 4004 // Rotate the result of the final CS so that the field is in the lower 4005 // bits of a GR32, then truncate it. 4006 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 4007 DAG.getConstant(BitSize, DL, WideVT)); 4008 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 4009 4010 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 4011 return DAG.getMergeValues(RetOps, DL); 4012 } 4013 4014 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations 4015 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit 4016 // operations into additions. 4017 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, 4018 SelectionDAG &DAG) const { 4019 auto *Node = cast<AtomicSDNode>(Op.getNode()); 4020 EVT MemVT = Node->getMemoryVT(); 4021 if (MemVT == MVT::i32 || MemVT == MVT::i64) { 4022 // A full-width operation. 4023 assert(Op.getValueType() == MemVT && "Mismatched VTs"); 4024 SDValue Src2 = Node->getVal(); 4025 SDValue NegSrc2; 4026 SDLoc DL(Src2); 4027 4028 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) { 4029 // Use an addition if the operand is constant and either LAA(G) is 4030 // available or the negative value is in the range of A(G)FHI. 4031 int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); 4032 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1()) 4033 NegSrc2 = DAG.getConstant(Value, DL, MemVT); 4034 } else if (Subtarget.hasInterlockedAccess1()) 4035 // Use LAA(G) if available. 4036 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT), 4037 Src2); 4038 4039 if (NegSrc2.getNode()) 4040 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, 4041 Node->getChain(), Node->getBasePtr(), NegSrc2, 4042 Node->getMemOperand()); 4043 4044 // Use the node as-is. 4045 return Op; 4046 } 4047 4048 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 4049 } 4050 4051 // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node. 4052 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 4053 SelectionDAG &DAG) const { 4054 auto *Node = cast<AtomicSDNode>(Op.getNode()); 4055 SDValue ChainIn = Node->getOperand(0); 4056 SDValue Addr = Node->getOperand(1); 4057 SDValue CmpVal = Node->getOperand(2); 4058 SDValue SwapVal = Node->getOperand(3); 4059 MachineMemOperand *MMO = Node->getMemOperand(); 4060 SDLoc DL(Node); 4061 4062 // We have native support for 32-bit and 64-bit compare and swap, but we 4063 // still need to expand extracting the "success" result from the CC. 4064 EVT NarrowVT = Node->getMemoryVT(); 4065 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32; 4066 if (NarrowVT == WideVT) { 4067 SDVTList Tys = DAG.getVTList(WideVT, MVT::i32, MVT::Other); 4068 SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal }; 4069 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP, 4070 DL, Tys, Ops, NarrowVT, MMO); 4071 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1), 4072 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); 4073 4074 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); 4075 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); 4076 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2)); 4077 return SDValue(); 4078 } 4079 4080 // Convert 8-bit and 16-bit compare and swap to a loop, implemented 4081 // via a fullword ATOMIC_CMP_SWAPW operation. 4082 int64_t BitSize = NarrowVT.getSizeInBits(); 4083 EVT PtrVT = Addr.getValueType(); 4084 4085 // Get the address of the containing word. 4086 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 4087 DAG.getConstant(-4, DL, PtrVT)); 4088 4089 // Get the number of bits that the word must be rotated left in order 4090 // to bring the field to the top bits of a GR32. 4091 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 4092 DAG.getConstant(3, DL, PtrVT)); 4093 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 4094 4095 // Get the complementing shift amount, for rotating a field in the top 4096 // bits back to its proper position. 4097 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 4098 DAG.getConstant(0, DL, WideVT), BitShift); 4099 4100 // Construct the ATOMIC_CMP_SWAPW node. 4101 SDVTList VTList = DAG.getVTList(WideVT, MVT::i32, MVT::Other); 4102 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 4103 NegBitShift, DAG.getConstant(BitSize, DL, WideVT) }; 4104 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 4105 VTList, Ops, NarrowVT, MMO); 4106 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1), 4107 SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ); 4108 4109 // emitAtomicCmpSwapW() will zero extend the result (original value). 4110 SDValue OrigVal = DAG.getNode(ISD::AssertZext, DL, WideVT, AtomicOp.getValue(0), 4111 DAG.getValueType(NarrowVT)); 4112 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), OrigVal); 4113 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); 4114 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2)); 4115 return SDValue(); 4116 } 4117 4118 MachineMemOperand::Flags 4119 SystemZTargetLowering::getTargetMMOFlags(const Instruction &I) const { 4120 // Because of how we convert atomic_load and atomic_store to normal loads and 4121 // stores in the DAG, we need to ensure that the MMOs are marked volatile 4122 // since DAGCombine hasn't been updated to account for atomic, but non 4123 // volatile loads. (See D57601) 4124 if (auto *SI = dyn_cast<StoreInst>(&I)) 4125 if (SI->isAtomic()) 4126 return MachineMemOperand::MOVolatile; 4127 if (auto *LI = dyn_cast<LoadInst>(&I)) 4128 if (LI->isAtomic()) 4129 return MachineMemOperand::MOVolatile; 4130 if (auto *AI = dyn_cast<AtomicRMWInst>(&I)) 4131 if (AI->isAtomic()) 4132 return MachineMemOperand::MOVolatile; 4133 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I)) 4134 if (AI->isAtomic()) 4135 return MachineMemOperand::MOVolatile; 4136 return MachineMemOperand::MONone; 4137 } 4138 4139 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 4140 SelectionDAG &DAG) const { 4141 MachineFunction &MF = DAG.getMachineFunction(); 4142 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 4143 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 4144 report_fatal_error("Variable-sized stack allocations are not supported " 4145 "in GHC calling convention"); 4146 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 4147 SystemZ::R15D, Op.getValueType()); 4148 } 4149 4150 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 4151 SelectionDAG &DAG) const { 4152 MachineFunction &MF = DAG.getMachineFunction(); 4153 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 4154 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); 4155 4156 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 4157 report_fatal_error("Variable-sized stack allocations are not supported " 4158 "in GHC calling convention"); 4159 4160 SDValue Chain = Op.getOperand(0); 4161 SDValue NewSP = Op.getOperand(1); 4162 SDValue Backchain; 4163 SDLoc DL(Op); 4164 4165 if (StoreBackchain) { 4166 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64); 4167 Backchain = DAG.getLoad(MVT::i64, DL, Chain, getBackchainAddress(OldSP, DAG), 4168 MachinePointerInfo()); 4169 } 4170 4171 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP); 4172 4173 if (StoreBackchain) 4174 Chain = DAG.getStore(Chain, DL, Backchain, getBackchainAddress(NewSP, DAG), 4175 MachinePointerInfo()); 4176 4177 return Chain; 4178 } 4179 4180 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, 4181 SelectionDAG &DAG) const { 4182 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 4183 if (!IsData) 4184 // Just preserve the chain. 4185 return Op.getOperand(0); 4186 4187 SDLoc DL(Op); 4188 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 4189 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; 4190 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); 4191 SDValue Ops[] = {Op.getOperand(0), DAG.getTargetConstant(Code, DL, MVT::i32), 4192 Op.getOperand(1)}; 4193 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL, 4194 Node->getVTList(), Ops, 4195 Node->getMemoryVT(), Node->getMemOperand()); 4196 } 4197 4198 // Convert condition code in CCReg to an i32 value. 4199 static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) { 4200 SDLoc DL(CCReg); 4201 SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg); 4202 return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM, 4203 DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32)); 4204 } 4205 4206 SDValue 4207 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, 4208 SelectionDAG &DAG) const { 4209 unsigned Opcode, CCValid; 4210 if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) { 4211 assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); 4212 SDNode *Node = emitIntrinsicWithCCAndChain(DAG, Op, Opcode); 4213 SDValue CC = getCCResult(DAG, SDValue(Node, 0)); 4214 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC); 4215 return SDValue(); 4216 } 4217 4218 return SDValue(); 4219 } 4220 4221 SDValue 4222 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, 4223 SelectionDAG &DAG) const { 4224 unsigned Opcode, CCValid; 4225 if (isIntrinsicWithCC(Op, Opcode, CCValid)) { 4226 SDNode *Node = emitIntrinsicWithCC(DAG, Op, Opcode); 4227 if (Op->getNumValues() == 1) 4228 return getCCResult(DAG, SDValue(Node, 0)); 4229 assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result"); 4230 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(), 4231 SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1))); 4232 } 4233 4234 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4235 switch (Id) { 4236 case Intrinsic::thread_pointer: 4237 return lowerThreadPointer(SDLoc(Op), DAG); 4238 4239 case Intrinsic::s390_vpdi: 4240 return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(), 4241 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4242 4243 case Intrinsic::s390_vperm: 4244 return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(), 4245 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4246 4247 case Intrinsic::s390_vuphb: 4248 case Intrinsic::s390_vuphh: 4249 case Intrinsic::s390_vuphf: 4250 return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(), 4251 Op.getOperand(1)); 4252 4253 case Intrinsic::s390_vuplhb: 4254 case Intrinsic::s390_vuplhh: 4255 case Intrinsic::s390_vuplhf: 4256 return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(), 4257 Op.getOperand(1)); 4258 4259 case Intrinsic::s390_vuplb: 4260 case Intrinsic::s390_vuplhw: 4261 case Intrinsic::s390_vuplf: 4262 return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(), 4263 Op.getOperand(1)); 4264 4265 case Intrinsic::s390_vupllb: 4266 case Intrinsic::s390_vupllh: 4267 case Intrinsic::s390_vupllf: 4268 return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(), 4269 Op.getOperand(1)); 4270 4271 case Intrinsic::s390_vsumb: 4272 case Intrinsic::s390_vsumh: 4273 case Intrinsic::s390_vsumgh: 4274 case Intrinsic::s390_vsumgf: 4275 case Intrinsic::s390_vsumqf: 4276 case Intrinsic::s390_vsumqg: 4277 return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(), 4278 Op.getOperand(1), Op.getOperand(2)); 4279 } 4280 4281 return SDValue(); 4282 } 4283 4284 namespace { 4285 // Says that SystemZISD operation Opcode can be used to perform the equivalent 4286 // of a VPERM with permute vector Bytes. If Opcode takes three operands, 4287 // Operand is the constant third operand, otherwise it is the number of 4288 // bytes in each element of the result. 4289 struct Permute { 4290 unsigned Opcode; 4291 unsigned Operand; 4292 unsigned char Bytes[SystemZ::VectorBytes]; 4293 }; 4294 } 4295 4296 static const Permute PermuteForms[] = { 4297 // VMRHG 4298 { SystemZISD::MERGE_HIGH, 8, 4299 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } }, 4300 // VMRHF 4301 { SystemZISD::MERGE_HIGH, 4, 4302 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } }, 4303 // VMRHH 4304 { SystemZISD::MERGE_HIGH, 2, 4305 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } }, 4306 // VMRHB 4307 { SystemZISD::MERGE_HIGH, 1, 4308 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } }, 4309 // VMRLG 4310 { SystemZISD::MERGE_LOW, 8, 4311 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } }, 4312 // VMRLF 4313 { SystemZISD::MERGE_LOW, 4, 4314 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }, 4315 // VMRLH 4316 { SystemZISD::MERGE_LOW, 2, 4317 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } }, 4318 // VMRLB 4319 { SystemZISD::MERGE_LOW, 1, 4320 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } }, 4321 // VPKG 4322 { SystemZISD::PACK, 4, 4323 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } }, 4324 // VPKF 4325 { SystemZISD::PACK, 2, 4326 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } }, 4327 // VPKH 4328 { SystemZISD::PACK, 1, 4329 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } }, 4330 // VPDI V1, V2, 4 (low half of V1, high half of V2) 4331 { SystemZISD::PERMUTE_DWORDS, 4, 4332 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } }, 4333 // VPDI V1, V2, 1 (high half of V1, low half of V2) 4334 { SystemZISD::PERMUTE_DWORDS, 1, 4335 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } } 4336 }; 4337 4338 // Called after matching a vector shuffle against a particular pattern. 4339 // Both the original shuffle and the pattern have two vector operands. 4340 // OpNos[0] is the operand of the original shuffle that should be used for 4341 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything. 4342 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and 4343 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used 4344 // for operands 0 and 1 of the pattern. 4345 static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) { 4346 if (OpNos[0] < 0) { 4347 if (OpNos[1] < 0) 4348 return false; 4349 OpNo0 = OpNo1 = OpNos[1]; 4350 } else if (OpNos[1] < 0) { 4351 OpNo0 = OpNo1 = OpNos[0]; 4352 } else { 4353 OpNo0 = OpNos[0]; 4354 OpNo1 = OpNos[1]; 4355 } 4356 return true; 4357 } 4358 4359 // Bytes is a VPERM-like permute vector, except that -1 is used for 4360 // undefined bytes. Return true if the VPERM can be implemented using P. 4361 // When returning true set OpNo0 to the VPERM operand that should be 4362 // used for operand 0 of P and likewise OpNo1 for operand 1 of P. 4363 // 4364 // For example, if swapping the VPERM operands allows P to match, OpNo0 4365 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one 4366 // operand, but rewriting it to use two duplicated operands allows it to 4367 // match P, then OpNo0 and OpNo1 will be the same. 4368 static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P, 4369 unsigned &OpNo0, unsigned &OpNo1) { 4370 int OpNos[] = { -1, -1 }; 4371 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) { 4372 int Elt = Bytes[I]; 4373 if (Elt >= 0) { 4374 // Make sure that the two permute vectors use the same suboperand 4375 // byte number. Only the operand numbers (the high bits) are 4376 // allowed to differ. 4377 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1)) 4378 return false; 4379 int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes; 4380 int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes; 4381 // Make sure that the operand mappings are consistent with previous 4382 // elements. 4383 if (OpNos[ModelOpNo] == 1 - RealOpNo) 4384 return false; 4385 OpNos[ModelOpNo] = RealOpNo; 4386 } 4387 } 4388 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); 4389 } 4390 4391 // As above, but search for a matching permute. 4392 static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes, 4393 unsigned &OpNo0, unsigned &OpNo1) { 4394 for (auto &P : PermuteForms) 4395 if (matchPermute(Bytes, P, OpNo0, OpNo1)) 4396 return &P; 4397 return nullptr; 4398 } 4399 4400 // Bytes is a VPERM-like permute vector, except that -1 is used for 4401 // undefined bytes. This permute is an operand of an outer permute. 4402 // See whether redistributing the -1 bytes gives a shuffle that can be 4403 // implemented using P. If so, set Transform to a VPERM-like permute vector 4404 // that, when applied to the result of P, gives the original permute in Bytes. 4405 static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes, 4406 const Permute &P, 4407 SmallVectorImpl<int> &Transform) { 4408 unsigned To = 0; 4409 for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) { 4410 int Elt = Bytes[From]; 4411 if (Elt < 0) 4412 // Byte number From of the result is undefined. 4413 Transform[From] = -1; 4414 else { 4415 while (P.Bytes[To] != Elt) { 4416 To += 1; 4417 if (To == SystemZ::VectorBytes) 4418 return false; 4419 } 4420 Transform[From] = To; 4421 } 4422 } 4423 return true; 4424 } 4425 4426 // As above, but search for a matching permute. 4427 static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes, 4428 SmallVectorImpl<int> &Transform) { 4429 for (auto &P : PermuteForms) 4430 if (matchDoublePermute(Bytes, P, Transform)) 4431 return &P; 4432 return nullptr; 4433 } 4434 4435 // Convert the mask of the given shuffle op into a byte-level mask, 4436 // as if it had type vNi8. 4437 static bool getVPermMask(SDValue ShuffleOp, 4438 SmallVectorImpl<int> &Bytes) { 4439 EVT VT = ShuffleOp.getValueType(); 4440 unsigned NumElements = VT.getVectorNumElements(); 4441 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 4442 4443 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) { 4444 Bytes.resize(NumElements * BytesPerElement, -1); 4445 for (unsigned I = 0; I < NumElements; ++I) { 4446 int Index = VSN->getMaskElt(I); 4447 if (Index >= 0) 4448 for (unsigned J = 0; J < BytesPerElement; ++J) 4449 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J; 4450 } 4451 return true; 4452 } 4453 if (SystemZISD::SPLAT == ShuffleOp.getOpcode() && 4454 isa<ConstantSDNode>(ShuffleOp.getOperand(1))) { 4455 unsigned Index = ShuffleOp.getConstantOperandVal(1); 4456 Bytes.resize(NumElements * BytesPerElement, -1); 4457 for (unsigned I = 0; I < NumElements; ++I) 4458 for (unsigned J = 0; J < BytesPerElement; ++J) 4459 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J; 4460 return true; 4461 } 4462 return false; 4463 } 4464 4465 // Bytes is a VPERM-like permute vector, except that -1 is used for 4466 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of 4467 // the result come from a contiguous sequence of bytes from one input. 4468 // Set Base to the selector for the first byte if so. 4469 static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start, 4470 unsigned BytesPerElement, int &Base) { 4471 Base = -1; 4472 for (unsigned I = 0; I < BytesPerElement; ++I) { 4473 if (Bytes[Start + I] >= 0) { 4474 unsigned Elem = Bytes[Start + I]; 4475 if (Base < 0) { 4476 Base = Elem - I; 4477 // Make sure the bytes would come from one input operand. 4478 if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size()) 4479 return false; 4480 } else if (unsigned(Base) != Elem - I) 4481 return false; 4482 } 4483 } 4484 return true; 4485 } 4486 4487 // Bytes is a VPERM-like permute vector, except that -1 is used for 4488 // undefined bytes. Return true if it can be performed using VSLDB. 4489 // When returning true, set StartIndex to the shift amount and OpNo0 4490 // and OpNo1 to the VPERM operands that should be used as the first 4491 // and second shift operand respectively. 4492 static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes, 4493 unsigned &StartIndex, unsigned &OpNo0, 4494 unsigned &OpNo1) { 4495 int OpNos[] = { -1, -1 }; 4496 int Shift = -1; 4497 for (unsigned I = 0; I < 16; ++I) { 4498 int Index = Bytes[I]; 4499 if (Index >= 0) { 4500 int ExpectedShift = (Index - I) % SystemZ::VectorBytes; 4501 int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes; 4502 int RealOpNo = unsigned(Index) / SystemZ::VectorBytes; 4503 if (Shift < 0) 4504 Shift = ExpectedShift; 4505 else if (Shift != ExpectedShift) 4506 return false; 4507 // Make sure that the operand mappings are consistent with previous 4508 // elements. 4509 if (OpNos[ModelOpNo] == 1 - RealOpNo) 4510 return false; 4511 OpNos[ModelOpNo] = RealOpNo; 4512 } 4513 } 4514 StartIndex = Shift; 4515 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); 4516 } 4517 4518 // Create a node that performs P on operands Op0 and Op1, casting the 4519 // operands to the appropriate type. The type of the result is determined by P. 4520 static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, 4521 const Permute &P, SDValue Op0, SDValue Op1) { 4522 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input 4523 // elements of a PACK are twice as wide as the outputs. 4524 unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 : 4525 P.Opcode == SystemZISD::PACK ? P.Operand * 2 : 4526 P.Operand); 4527 // Cast both operands to the appropriate type. 4528 MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8), 4529 SystemZ::VectorBytes / InBytes); 4530 Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0); 4531 Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1); 4532 SDValue Op; 4533 if (P.Opcode == SystemZISD::PERMUTE_DWORDS) { 4534 SDValue Op2 = DAG.getTargetConstant(P.Operand, DL, MVT::i32); 4535 Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2); 4536 } else if (P.Opcode == SystemZISD::PACK) { 4537 MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8), 4538 SystemZ::VectorBytes / P.Operand); 4539 Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1); 4540 } else { 4541 Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1); 4542 } 4543 return Op; 4544 } 4545 4546 static bool isZeroVector(SDValue N) { 4547 if (N->getOpcode() == ISD::BITCAST) 4548 N = N->getOperand(0); 4549 if (N->getOpcode() == ISD::SPLAT_VECTOR) 4550 if (auto *Op = dyn_cast<ConstantSDNode>(N->getOperand(0))) 4551 return Op->getZExtValue() == 0; 4552 return ISD::isBuildVectorAllZeros(N.getNode()); 4553 } 4554 4555 // Return the index of the zero/undef vector, or UINT32_MAX if not found. 4556 static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num) { 4557 for (unsigned I = 0; I < Num ; I++) 4558 if (isZeroVector(Ops[I])) 4559 return I; 4560 return UINT32_MAX; 4561 } 4562 4563 // Bytes is a VPERM-like permute vector, except that -1 is used for 4564 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using 4565 // VSLDB or VPERM. 4566 static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, 4567 SDValue *Ops, 4568 const SmallVectorImpl<int> &Bytes) { 4569 for (unsigned I = 0; I < 2; ++I) 4570 Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]); 4571 4572 // First see whether VSLDB can be used. 4573 unsigned StartIndex, OpNo0, OpNo1; 4574 if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1)) 4575 return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0], 4576 Ops[OpNo1], 4577 DAG.getTargetConstant(StartIndex, DL, MVT::i32)); 4578 4579 // Fall back on VPERM. Construct an SDNode for the permute vector. Try to 4580 // eliminate a zero vector by reusing any zero index in the permute vector. 4581 unsigned ZeroVecIdx = findZeroVectorIdx(&Ops[0], 2); 4582 if (ZeroVecIdx != UINT32_MAX) { 4583 bool MaskFirst = true; 4584 int ZeroIdx = -1; 4585 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) { 4586 unsigned OpNo = unsigned(Bytes[I]) / SystemZ::VectorBytes; 4587 unsigned Byte = unsigned(Bytes[I]) % SystemZ::VectorBytes; 4588 if (OpNo == ZeroVecIdx && I == 0) { 4589 // If the first byte is zero, use mask as first operand. 4590 ZeroIdx = 0; 4591 break; 4592 } 4593 if (OpNo != ZeroVecIdx && Byte == 0) { 4594 // If mask contains a zero, use it by placing that vector first. 4595 ZeroIdx = I + SystemZ::VectorBytes; 4596 MaskFirst = false; 4597 break; 4598 } 4599 } 4600 if (ZeroIdx != -1) { 4601 SDValue IndexNodes[SystemZ::VectorBytes]; 4602 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) { 4603 if (Bytes[I] >= 0) { 4604 unsigned OpNo = unsigned(Bytes[I]) / SystemZ::VectorBytes; 4605 unsigned Byte = unsigned(Bytes[I]) % SystemZ::VectorBytes; 4606 if (OpNo == ZeroVecIdx) 4607 IndexNodes[I] = DAG.getConstant(ZeroIdx, DL, MVT::i32); 4608 else { 4609 unsigned BIdx = MaskFirst ? Byte + SystemZ::VectorBytes : Byte; 4610 IndexNodes[I] = DAG.getConstant(BIdx, DL, MVT::i32); 4611 } 4612 } else 4613 IndexNodes[I] = DAG.getUNDEF(MVT::i32); 4614 } 4615 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes); 4616 SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0]; 4617 if (MaskFirst) 4618 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Mask, Src, 4619 Mask); 4620 else 4621 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Src, Mask, 4622 Mask); 4623 } 4624 } 4625 4626 SDValue IndexNodes[SystemZ::VectorBytes]; 4627 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) 4628 if (Bytes[I] >= 0) 4629 IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32); 4630 else 4631 IndexNodes[I] = DAG.getUNDEF(MVT::i32); 4632 SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes); 4633 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], 4634 (!Ops[1].isUndef() ? Ops[1] : Ops[0]), Op2); 4635 } 4636 4637 namespace { 4638 // Describes a general N-operand vector shuffle. 4639 struct GeneralShuffle { 4640 GeneralShuffle(EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {} 4641 void addUndef(); 4642 bool add(SDValue, unsigned); 4643 SDValue getNode(SelectionDAG &, const SDLoc &); 4644 void tryPrepareForUnpack(); 4645 bool unpackWasPrepared() { return UnpackFromEltSize <= 4; } 4646 SDValue insertUnpackIfPrepared(SelectionDAG &DAG, const SDLoc &DL, SDValue Op); 4647 4648 // The operands of the shuffle. 4649 SmallVector<SDValue, SystemZ::VectorBytes> Ops; 4650 4651 // Index I is -1 if byte I of the result is undefined. Otherwise the 4652 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand 4653 // Bytes[I] / SystemZ::VectorBytes. 4654 SmallVector<int, SystemZ::VectorBytes> Bytes; 4655 4656 // The type of the shuffle result. 4657 EVT VT; 4658 4659 // Holds a value of 1, 2 or 4 if a final unpack has been prepared for. 4660 unsigned UnpackFromEltSize; 4661 }; 4662 } 4663 4664 // Add an extra undefined element to the shuffle. 4665 void GeneralShuffle::addUndef() { 4666 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 4667 for (unsigned I = 0; I < BytesPerElement; ++I) 4668 Bytes.push_back(-1); 4669 } 4670 4671 // Add an extra element to the shuffle, taking it from element Elem of Op. 4672 // A null Op indicates a vector input whose value will be calculated later; 4673 // there is at most one such input per shuffle and it always has the same 4674 // type as the result. Aborts and returns false if the source vector elements 4675 // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per 4676 // LLVM they become implicitly extended, but this is rare and not optimized. 4677 bool GeneralShuffle::add(SDValue Op, unsigned Elem) { 4678 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 4679 4680 // The source vector can have wider elements than the result, 4681 // either through an explicit TRUNCATE or because of type legalization. 4682 // We want the least significant part. 4683 EVT FromVT = Op.getNode() ? Op.getValueType() : VT; 4684 unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize(); 4685 4686 // Return false if the source elements are smaller than their destination 4687 // elements. 4688 if (FromBytesPerElement < BytesPerElement) 4689 return false; 4690 4691 unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes + 4692 (FromBytesPerElement - BytesPerElement)); 4693 4694 // Look through things like shuffles and bitcasts. 4695 while (Op.getNode()) { 4696 if (Op.getOpcode() == ISD::BITCAST) 4697 Op = Op.getOperand(0); 4698 else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) { 4699 // See whether the bytes we need come from a contiguous part of one 4700 // operand. 4701 SmallVector<int, SystemZ::VectorBytes> OpBytes; 4702 if (!getVPermMask(Op, OpBytes)) 4703 break; 4704 int NewByte; 4705 if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte)) 4706 break; 4707 if (NewByte < 0) { 4708 addUndef(); 4709 return true; 4710 } 4711 Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes); 4712 Byte = unsigned(NewByte) % SystemZ::VectorBytes; 4713 } else if (Op.isUndef()) { 4714 addUndef(); 4715 return true; 4716 } else 4717 break; 4718 } 4719 4720 // Make sure that the source of the extraction is in Ops. 4721 unsigned OpNo = 0; 4722 for (; OpNo < Ops.size(); ++OpNo) 4723 if (Ops[OpNo] == Op) 4724 break; 4725 if (OpNo == Ops.size()) 4726 Ops.push_back(Op); 4727 4728 // Add the element to Bytes. 4729 unsigned Base = OpNo * SystemZ::VectorBytes + Byte; 4730 for (unsigned I = 0; I < BytesPerElement; ++I) 4731 Bytes.push_back(Base + I); 4732 4733 return true; 4734 } 4735 4736 // Return SDNodes for the completed shuffle. 4737 SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) { 4738 assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector"); 4739 4740 if (Ops.size() == 0) 4741 return DAG.getUNDEF(VT); 4742 4743 // Use a single unpack if possible as the last operation. 4744 tryPrepareForUnpack(); 4745 4746 // Make sure that there are at least two shuffle operands. 4747 if (Ops.size() == 1) 4748 Ops.push_back(DAG.getUNDEF(MVT::v16i8)); 4749 4750 // Create a tree of shuffles, deferring root node until after the loop. 4751 // Try to redistribute the undefined elements of non-root nodes so that 4752 // the non-root shuffles match something like a pack or merge, then adjust 4753 // the parent node's permute vector to compensate for the new order. 4754 // Among other things, this copes with vectors like <2 x i16> that were 4755 // padded with undefined elements during type legalization. 4756 // 4757 // In the best case this redistribution will lead to the whole tree 4758 // using packs and merges. It should rarely be a loss in other cases. 4759 unsigned Stride = 1; 4760 for (; Stride * 2 < Ops.size(); Stride *= 2) { 4761 for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) { 4762 SDValue SubOps[] = { Ops[I], Ops[I + Stride] }; 4763 4764 // Create a mask for just these two operands. 4765 SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes); 4766 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { 4767 unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes; 4768 unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes; 4769 if (OpNo == I) 4770 NewBytes[J] = Byte; 4771 else if (OpNo == I + Stride) 4772 NewBytes[J] = SystemZ::VectorBytes + Byte; 4773 else 4774 NewBytes[J] = -1; 4775 } 4776 // See if it would be better to reorganize NewMask to avoid using VPERM. 4777 SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes); 4778 if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) { 4779 Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]); 4780 // Applying NewBytesMap to Ops[I] gets back to NewBytes. 4781 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { 4782 if (NewBytes[J] >= 0) { 4783 assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && 4784 "Invalid double permute"); 4785 Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J]; 4786 } else 4787 assert(NewBytesMap[J] < 0 && "Invalid double permute"); 4788 } 4789 } else { 4790 // Just use NewBytes on the operands. 4791 Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes); 4792 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) 4793 if (NewBytes[J] >= 0) 4794 Bytes[J] = I * SystemZ::VectorBytes + J; 4795 } 4796 } 4797 } 4798 4799 // Now we just have 2 inputs. Put the second operand in Ops[1]. 4800 if (Stride > 1) { 4801 Ops[1] = Ops[Stride]; 4802 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) 4803 if (Bytes[I] >= int(SystemZ::VectorBytes)) 4804 Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes; 4805 } 4806 4807 // Look for an instruction that can do the permute without resorting 4808 // to VPERM. 4809 unsigned OpNo0, OpNo1; 4810 SDValue Op; 4811 if (unpackWasPrepared() && Ops[1].isUndef()) 4812 Op = Ops[0]; 4813 else if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1)) 4814 Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]); 4815 else 4816 Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes); 4817 4818 Op = insertUnpackIfPrepared(DAG, DL, Op); 4819 4820 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4821 } 4822 4823 #ifndef NDEBUG 4824 static void dumpBytes(const SmallVectorImpl<int> &Bytes, std::string Msg) { 4825 dbgs() << Msg.c_str() << " { "; 4826 for (unsigned i = 0; i < Bytes.size(); i++) 4827 dbgs() << Bytes[i] << " "; 4828 dbgs() << "}\n"; 4829 } 4830 #endif 4831 4832 // If the Bytes vector matches an unpack operation, prepare to do the unpack 4833 // after all else by removing the zero vector and the effect of the unpack on 4834 // Bytes. 4835 void GeneralShuffle::tryPrepareForUnpack() { 4836 uint32_t ZeroVecOpNo = findZeroVectorIdx(&Ops[0], Ops.size()); 4837 if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1) 4838 return; 4839 4840 // Only do this if removing the zero vector reduces the depth, otherwise 4841 // the critical path will increase with the final unpack. 4842 if (Ops.size() > 2 && 4843 Log2_32_Ceil(Ops.size()) == Log2_32_Ceil(Ops.size() - 1)) 4844 return; 4845 4846 // Find an unpack that would allow removing the zero vector from Ops. 4847 UnpackFromEltSize = 1; 4848 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) { 4849 bool MatchUnpack = true; 4850 SmallVector<int, SystemZ::VectorBytes> SrcBytes; 4851 for (unsigned Elt = 0; Elt < SystemZ::VectorBytes; Elt++) { 4852 unsigned ToEltSize = UnpackFromEltSize * 2; 4853 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize; 4854 if (!IsZextByte) 4855 SrcBytes.push_back(Bytes[Elt]); 4856 if (Bytes[Elt] != -1) { 4857 unsigned OpNo = unsigned(Bytes[Elt]) / SystemZ::VectorBytes; 4858 if (IsZextByte != (OpNo == ZeroVecOpNo)) { 4859 MatchUnpack = false; 4860 break; 4861 } 4862 } 4863 } 4864 if (MatchUnpack) { 4865 if (Ops.size() == 2) { 4866 // Don't use unpack if a single source operand needs rearrangement. 4867 for (unsigned i = 0; i < SystemZ::VectorBytes / 2; i++) 4868 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 != int(i)) { 4869 UnpackFromEltSize = UINT_MAX; 4870 return; 4871 } 4872 } 4873 break; 4874 } 4875 } 4876 if (UnpackFromEltSize > 4) 4877 return; 4878 4879 LLVM_DEBUG(dbgs() << "Preparing for final unpack of element size " 4880 << UnpackFromEltSize << ". Zero vector is Op#" << ZeroVecOpNo 4881 << ".\n"; 4882 dumpBytes(Bytes, "Original Bytes vector:");); 4883 4884 // Apply the unpack in reverse to the Bytes array. 4885 unsigned B = 0; 4886 for (unsigned Elt = 0; Elt < SystemZ::VectorBytes;) { 4887 Elt += UnpackFromEltSize; 4888 for (unsigned i = 0; i < UnpackFromEltSize; i++, Elt++, B++) 4889 Bytes[B] = Bytes[Elt]; 4890 } 4891 while (B < SystemZ::VectorBytes) 4892 Bytes[B++] = -1; 4893 4894 // Remove the zero vector from Ops 4895 Ops.erase(&Ops[ZeroVecOpNo]); 4896 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) 4897 if (Bytes[I] >= 0) { 4898 unsigned OpNo = unsigned(Bytes[I]) / SystemZ::VectorBytes; 4899 if (OpNo > ZeroVecOpNo) 4900 Bytes[I] -= SystemZ::VectorBytes; 4901 } 4902 4903 LLVM_DEBUG(dumpBytes(Bytes, "Resulting Bytes vector, zero vector removed:"); 4904 dbgs() << "\n";); 4905 } 4906 4907 SDValue GeneralShuffle::insertUnpackIfPrepared(SelectionDAG &DAG, 4908 const SDLoc &DL, 4909 SDValue Op) { 4910 if (!unpackWasPrepared()) 4911 return Op; 4912 unsigned InBits = UnpackFromEltSize * 8; 4913 EVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBits), 4914 SystemZ::VectorBits / InBits); 4915 SDValue PackedOp = DAG.getNode(ISD::BITCAST, DL, InVT, Op); 4916 unsigned OutBits = InBits * 2; 4917 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(OutBits), 4918 SystemZ::VectorBits / OutBits); 4919 return DAG.getNode(SystemZISD::UNPACKL_HIGH, DL, OutVT, PackedOp); 4920 } 4921 4922 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion. 4923 static bool isScalarToVector(SDValue Op) { 4924 for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I) 4925 if (!Op.getOperand(I).isUndef()) 4926 return false; 4927 return true; 4928 } 4929 4930 // Return a vector of type VT that contains Value in the first element. 4931 // The other elements don't matter. 4932 static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 4933 SDValue Value) { 4934 // If we have a constant, replicate it to all elements and let the 4935 // BUILD_VECTOR lowering take care of it. 4936 if (Value.getOpcode() == ISD::Constant || 4937 Value.getOpcode() == ISD::ConstantFP) { 4938 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value); 4939 return DAG.getBuildVector(VT, DL, Ops); 4940 } 4941 if (Value.isUndef()) 4942 return DAG.getUNDEF(VT); 4943 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); 4944 } 4945 4946 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in 4947 // element 1. Used for cases in which replication is cheap. 4948 static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 4949 SDValue Op0, SDValue Op1) { 4950 if (Op0.isUndef()) { 4951 if (Op1.isUndef()) 4952 return DAG.getUNDEF(VT); 4953 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1); 4954 } 4955 if (Op1.isUndef()) 4956 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0); 4957 return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT, 4958 buildScalarToVector(DAG, DL, VT, Op0), 4959 buildScalarToVector(DAG, DL, VT, Op1)); 4960 } 4961 4962 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64 4963 // vector for them. 4964 static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, 4965 SDValue Op1) { 4966 if (Op0.isUndef() && Op1.isUndef()) 4967 return DAG.getUNDEF(MVT::v2i64); 4968 // If one of the two inputs is undefined then replicate the other one, 4969 // in order to avoid using another register unnecessarily. 4970 if (Op0.isUndef()) 4971 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); 4972 else if (Op1.isUndef()) 4973 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 4974 else { 4975 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 4976 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); 4977 } 4978 return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1); 4979 } 4980 4981 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually 4982 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for 4983 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR 4984 // would benefit from this representation and return it if so. 4985 static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, 4986 BuildVectorSDNode *BVN) { 4987 EVT VT = BVN->getValueType(0); 4988 unsigned NumElements = VT.getVectorNumElements(); 4989 4990 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation 4991 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still 4992 // need a BUILD_VECTOR, add an additional placeholder operand for that 4993 // BUILD_VECTOR and store its operands in ResidueOps. 4994 GeneralShuffle GS(VT); 4995 SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps; 4996 bool FoundOne = false; 4997 for (unsigned I = 0; I < NumElements; ++I) { 4998 SDValue Op = BVN->getOperand(I); 4999 if (Op.getOpcode() == ISD::TRUNCATE) 5000 Op = Op.getOperand(0); 5001 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5002 Op.getOperand(1).getOpcode() == ISD::Constant) { 5003 unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5004 if (!GS.add(Op.getOperand(0), Elem)) 5005 return SDValue(); 5006 FoundOne = true; 5007 } else if (Op.isUndef()) { 5008 GS.addUndef(); 5009 } else { 5010 if (!GS.add(SDValue(), ResidueOps.size())) 5011 return SDValue(); 5012 ResidueOps.push_back(BVN->getOperand(I)); 5013 } 5014 } 5015 5016 // Nothing to do if there are no EXTRACT_VECTOR_ELTs. 5017 if (!FoundOne) 5018 return SDValue(); 5019 5020 // Create the BUILD_VECTOR for the remaining elements, if any. 5021 if (!ResidueOps.empty()) { 5022 while (ResidueOps.size() < NumElements) 5023 ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType())); 5024 for (auto &Op : GS.Ops) { 5025 if (!Op.getNode()) { 5026 Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps); 5027 break; 5028 } 5029 } 5030 } 5031 return GS.getNode(DAG, SDLoc(BVN)); 5032 } 5033 5034 bool SystemZTargetLowering::isVectorElementLoad(SDValue Op) const { 5035 if (Op.getOpcode() == ISD::LOAD && cast<LoadSDNode>(Op)->isUnindexed()) 5036 return true; 5037 if (Subtarget.hasVectorEnhancements2() && Op.getOpcode() == SystemZISD::LRV) 5038 return true; 5039 return false; 5040 } 5041 5042 // Combine GPR scalar values Elems into a vector of type VT. 5043 SDValue 5044 SystemZTargetLowering::buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 5045 SmallVectorImpl<SDValue> &Elems) const { 5046 // See whether there is a single replicated value. 5047 SDValue Single; 5048 unsigned int NumElements = Elems.size(); 5049 unsigned int Count = 0; 5050 for (auto Elem : Elems) { 5051 if (!Elem.isUndef()) { 5052 if (!Single.getNode()) 5053 Single = Elem; 5054 else if (Elem != Single) { 5055 Single = SDValue(); 5056 break; 5057 } 5058 Count += 1; 5059 } 5060 } 5061 // There are three cases here: 5062 // 5063 // - if the only defined element is a loaded one, the best sequence 5064 // is a replicating load. 5065 // 5066 // - otherwise, if the only defined element is an i64 value, we will 5067 // end up with the same VLVGP sequence regardless of whether we short-cut 5068 // for replication or fall through to the later code. 5069 // 5070 // - otherwise, if the only defined element is an i32 or smaller value, 5071 // we would need 2 instructions to replicate it: VLVGP followed by VREPx. 5072 // This is only a win if the single defined element is used more than once. 5073 // In other cases we're better off using a single VLVGx. 5074 if (Single.getNode() && (Count > 1 || isVectorElementLoad(Single))) 5075 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single); 5076 5077 // If all elements are loads, use VLREP/VLEs (below). 5078 bool AllLoads = true; 5079 for (auto Elem : Elems) 5080 if (!isVectorElementLoad(Elem)) { 5081 AllLoads = false; 5082 break; 5083 } 5084 5085 // The best way of building a v2i64 from two i64s is to use VLVGP. 5086 if (VT == MVT::v2i64 && !AllLoads) 5087 return joinDwords(DAG, DL, Elems[0], Elems[1]); 5088 5089 // Use a 64-bit merge high to combine two doubles. 5090 if (VT == MVT::v2f64 && !AllLoads) 5091 return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); 5092 5093 // Build v4f32 values directly from the FPRs: 5094 // 5095 // <Axxx> <Bxxx> <Cxxxx> <Dxxx> 5096 // V V VMRHF 5097 // <ABxx> <CDxx> 5098 // V VMRHG 5099 // <ABCD> 5100 if (VT == MVT::v4f32 && !AllLoads) { 5101 SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); 5102 SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]); 5103 // Avoid unnecessary undefs by reusing the other operand. 5104 if (Op01.isUndef()) 5105 Op01 = Op23; 5106 else if (Op23.isUndef()) 5107 Op23 = Op01; 5108 // Merging identical replications is a no-op. 5109 if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23) 5110 return Op01; 5111 Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01); 5112 Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23); 5113 SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH, 5114 DL, MVT::v2i64, Op01, Op23); 5115 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 5116 } 5117 5118 // Collect the constant terms. 5119 SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue()); 5120 SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false); 5121 5122 unsigned NumConstants = 0; 5123 for (unsigned I = 0; I < NumElements; ++I) { 5124 SDValue Elem = Elems[I]; 5125 if (Elem.getOpcode() == ISD::Constant || 5126 Elem.getOpcode() == ISD::ConstantFP) { 5127 NumConstants += 1; 5128 Constants[I] = Elem; 5129 Done[I] = true; 5130 } 5131 } 5132 // If there was at least one constant, fill in the other elements of 5133 // Constants with undefs to get a full vector constant and use that 5134 // as the starting point. 5135 SDValue Result; 5136 SDValue ReplicatedVal; 5137 if (NumConstants > 0) { 5138 for (unsigned I = 0; I < NumElements; ++I) 5139 if (!Constants[I].getNode()) 5140 Constants[I] = DAG.getUNDEF(Elems[I].getValueType()); 5141 Result = DAG.getBuildVector(VT, DL, Constants); 5142 } else { 5143 // Otherwise try to use VLREP or VLVGP to start the sequence in order to 5144 // avoid a false dependency on any previous contents of the vector 5145 // register. 5146 5147 // Use a VLREP if at least one element is a load. Make sure to replicate 5148 // the load with the most elements having its value. 5149 std::map<const SDNode*, unsigned> UseCounts; 5150 SDNode *LoadMaxUses = nullptr; 5151 for (unsigned I = 0; I < NumElements; ++I) 5152 if (isVectorElementLoad(Elems[I])) { 5153 SDNode *Ld = Elems[I].getNode(); 5154 UseCounts[Ld]++; 5155 if (LoadMaxUses == nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld]) 5156 LoadMaxUses = Ld; 5157 } 5158 if (LoadMaxUses != nullptr) { 5159 ReplicatedVal = SDValue(LoadMaxUses, 0); 5160 Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, ReplicatedVal); 5161 } else { 5162 // Try to use VLVGP. 5163 unsigned I1 = NumElements / 2 - 1; 5164 unsigned I2 = NumElements - 1; 5165 bool Def1 = !Elems[I1].isUndef(); 5166 bool Def2 = !Elems[I2].isUndef(); 5167 if (Def1 || Def2) { 5168 SDValue Elem1 = Elems[Def1 ? I1 : I2]; 5169 SDValue Elem2 = Elems[Def2 ? I2 : I1]; 5170 Result = DAG.getNode(ISD::BITCAST, DL, VT, 5171 joinDwords(DAG, DL, Elem1, Elem2)); 5172 Done[I1] = true; 5173 Done[I2] = true; 5174 } else 5175 Result = DAG.getUNDEF(VT); 5176 } 5177 } 5178 5179 // Use VLVGx to insert the other elements. 5180 for (unsigned I = 0; I < NumElements; ++I) 5181 if (!Done[I] && !Elems[I].isUndef() && Elems[I] != ReplicatedVal) 5182 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I], 5183 DAG.getConstant(I, DL, MVT::i32)); 5184 return Result; 5185 } 5186 5187 SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op, 5188 SelectionDAG &DAG) const { 5189 auto *BVN = cast<BuildVectorSDNode>(Op.getNode()); 5190 SDLoc DL(Op); 5191 EVT VT = Op.getValueType(); 5192 5193 if (BVN->isConstant()) { 5194 if (SystemZVectorConstantInfo(BVN).isVectorConstantLegal(Subtarget)) 5195 return Op; 5196 5197 // Fall back to loading it from memory. 5198 return SDValue(); 5199 } 5200 5201 // See if we should use shuffles to construct the vector from other vectors. 5202 if (SDValue Res = tryBuildVectorShuffle(DAG, BVN)) 5203 return Res; 5204 5205 // Detect SCALAR_TO_VECTOR conversions. 5206 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op)) 5207 return buildScalarToVector(DAG, DL, VT, Op.getOperand(0)); 5208 5209 // Otherwise use buildVector to build the vector up from GPRs. 5210 unsigned NumElements = Op.getNumOperands(); 5211 SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements); 5212 for (unsigned I = 0; I < NumElements; ++I) 5213 Ops[I] = Op.getOperand(I); 5214 return buildVector(DAG, DL, VT, Ops); 5215 } 5216 5217 SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, 5218 SelectionDAG &DAG) const { 5219 auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode()); 5220 SDLoc DL(Op); 5221 EVT VT = Op.getValueType(); 5222 unsigned NumElements = VT.getVectorNumElements(); 5223 5224 if (VSN->isSplat()) { 5225 SDValue Op0 = Op.getOperand(0); 5226 unsigned Index = VSN->getSplatIndex(); 5227 assert(Index < VT.getVectorNumElements() && 5228 "Splat index should be defined and in first operand"); 5229 // See whether the value we're splatting is directly available as a scalar. 5230 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) || 5231 Op0.getOpcode() == ISD::BUILD_VECTOR) 5232 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index)); 5233 // Otherwise keep it as a vector-to-vector operation. 5234 return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0), 5235 DAG.getTargetConstant(Index, DL, MVT::i32)); 5236 } 5237 5238 GeneralShuffle GS(VT); 5239 for (unsigned I = 0; I < NumElements; ++I) { 5240 int Elt = VSN->getMaskElt(I); 5241 if (Elt < 0) 5242 GS.addUndef(); 5243 else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements), 5244 unsigned(Elt) % NumElements)) 5245 return SDValue(); 5246 } 5247 return GS.getNode(DAG, SDLoc(VSN)); 5248 } 5249 5250 SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op, 5251 SelectionDAG &DAG) const { 5252 SDLoc DL(Op); 5253 // Just insert the scalar into element 0 of an undefined vector. 5254 return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, 5255 Op.getValueType(), DAG.getUNDEF(Op.getValueType()), 5256 Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32)); 5257 } 5258 5259 SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 5260 SelectionDAG &DAG) const { 5261 // Handle insertions of floating-point values. 5262 SDLoc DL(Op); 5263 SDValue Op0 = Op.getOperand(0); 5264 SDValue Op1 = Op.getOperand(1); 5265 SDValue Op2 = Op.getOperand(2); 5266 EVT VT = Op.getValueType(); 5267 5268 // Insertions into constant indices of a v2f64 can be done using VPDI. 5269 // However, if the inserted value is a bitcast or a constant then it's 5270 // better to use GPRs, as below. 5271 if (VT == MVT::v2f64 && 5272 Op1.getOpcode() != ISD::BITCAST && 5273 Op1.getOpcode() != ISD::ConstantFP && 5274 Op2.getOpcode() == ISD::Constant) { 5275 uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue(); 5276 unsigned Mask = VT.getVectorNumElements() - 1; 5277 if (Index <= Mask) 5278 return Op; 5279 } 5280 5281 // Otherwise bitcast to the equivalent integer form and insert via a GPR. 5282 MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits()); 5283 MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements()); 5284 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT, 5285 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), 5286 DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2); 5287 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 5288 } 5289 5290 SDValue 5291 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 5292 SelectionDAG &DAG) const { 5293 // Handle extractions of floating-point values. 5294 SDLoc DL(Op); 5295 SDValue Op0 = Op.getOperand(0); 5296 SDValue Op1 = Op.getOperand(1); 5297 EVT VT = Op.getValueType(); 5298 EVT VecVT = Op0.getValueType(); 5299 5300 // Extractions of constant indices can be done directly. 5301 if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) { 5302 uint64_t Index = CIndexN->getZExtValue(); 5303 unsigned Mask = VecVT.getVectorNumElements() - 1; 5304 if (Index <= Mask) 5305 return Op; 5306 } 5307 5308 // Otherwise bitcast to the equivalent integer form and extract via a GPR. 5309 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits()); 5310 MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements()); 5311 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT, 5312 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1); 5313 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 5314 } 5315 5316 SDValue SystemZTargetLowering:: 5317 lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const { 5318 SDValue PackedOp = Op.getOperand(0); 5319 EVT OutVT = Op.getValueType(); 5320 EVT InVT = PackedOp.getValueType(); 5321 unsigned ToBits = OutVT.getScalarSizeInBits(); 5322 unsigned FromBits = InVT.getScalarSizeInBits(); 5323 do { 5324 FromBits *= 2; 5325 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits), 5326 SystemZ::VectorBits / FromBits); 5327 PackedOp = 5328 DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(PackedOp), OutVT, PackedOp); 5329 } while (FromBits != ToBits); 5330 return PackedOp; 5331 } 5332 5333 // Lower a ZERO_EXTEND_VECTOR_INREG to a vector shuffle with a zero vector. 5334 SDValue SystemZTargetLowering:: 5335 lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const { 5336 SDValue PackedOp = Op.getOperand(0); 5337 SDLoc DL(Op); 5338 EVT OutVT = Op.getValueType(); 5339 EVT InVT = PackedOp.getValueType(); 5340 unsigned InNumElts = InVT.getVectorNumElements(); 5341 unsigned OutNumElts = OutVT.getVectorNumElements(); 5342 unsigned NumInPerOut = InNumElts / OutNumElts; 5343 5344 SDValue ZeroVec = 5345 DAG.getSplatVector(InVT, DL, DAG.getConstant(0, DL, InVT.getScalarType())); 5346 5347 SmallVector<int, 16> Mask(InNumElts); 5348 unsigned ZeroVecElt = InNumElts; 5349 for (unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) { 5350 unsigned MaskElt = PackedElt * NumInPerOut; 5351 unsigned End = MaskElt + NumInPerOut - 1; 5352 for (; MaskElt < End; MaskElt++) 5353 Mask[MaskElt] = ZeroVecElt++; 5354 Mask[MaskElt] = PackedElt; 5355 } 5356 SDValue Shuf = DAG.getVectorShuffle(InVT, DL, PackedOp, ZeroVec, Mask); 5357 return DAG.getNode(ISD::BITCAST, DL, OutVT, Shuf); 5358 } 5359 5360 SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG, 5361 unsigned ByScalar) const { 5362 // Look for cases where a vector shift can use the *_BY_SCALAR form. 5363 SDValue Op0 = Op.getOperand(0); 5364 SDValue Op1 = Op.getOperand(1); 5365 SDLoc DL(Op); 5366 EVT VT = Op.getValueType(); 5367 unsigned ElemBitSize = VT.getScalarSizeInBits(); 5368 5369 // See whether the shift vector is a splat represented as BUILD_VECTOR. 5370 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) { 5371 APInt SplatBits, SplatUndef; 5372 unsigned SplatBitSize; 5373 bool HasAnyUndefs; 5374 // Check for constant splats. Use ElemBitSize as the minimum element 5375 // width and reject splats that need wider elements. 5376 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 5377 ElemBitSize, true) && 5378 SplatBitSize == ElemBitSize) { 5379 SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff, 5380 DL, MVT::i32); 5381 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 5382 } 5383 // Check for variable splats. 5384 BitVector UndefElements; 5385 SDValue Splat = BVN->getSplatValue(&UndefElements); 5386 if (Splat) { 5387 // Since i32 is the smallest legal type, we either need a no-op 5388 // or a truncation. 5389 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat); 5390 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 5391 } 5392 } 5393 5394 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR, 5395 // and the shift amount is directly available in a GPR. 5396 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) { 5397 if (VSN->isSplat()) { 5398 SDValue VSNOp0 = VSN->getOperand(0); 5399 unsigned Index = VSN->getSplatIndex(); 5400 assert(Index < VT.getVectorNumElements() && 5401 "Splat index should be defined and in first operand"); 5402 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) || 5403 VSNOp0.getOpcode() == ISD::BUILD_VECTOR) { 5404 // Since i32 is the smallest legal type, we either need a no-op 5405 // or a truncation. 5406 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, 5407 VSNOp0.getOperand(Index)); 5408 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 5409 } 5410 } 5411 } 5412 5413 // Otherwise just treat the current form as legal. 5414 return Op; 5415 } 5416 5417 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 5418 SelectionDAG &DAG) const { 5419 switch (Op.getOpcode()) { 5420 case ISD::FRAMEADDR: 5421 return lowerFRAMEADDR(Op, DAG); 5422 case ISD::RETURNADDR: 5423 return lowerRETURNADDR(Op, DAG); 5424 case ISD::BR_CC: 5425 return lowerBR_CC(Op, DAG); 5426 case ISD::SELECT_CC: 5427 return lowerSELECT_CC(Op, DAG); 5428 case ISD::SETCC: 5429 return lowerSETCC(Op, DAG); 5430 case ISD::STRICT_FSETCC: 5431 return lowerSTRICT_FSETCC(Op, DAG, false); 5432 case ISD::STRICT_FSETCCS: 5433 return lowerSTRICT_FSETCC(Op, DAG, true); 5434 case ISD::GlobalAddress: 5435 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 5436 case ISD::GlobalTLSAddress: 5437 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 5438 case ISD::BlockAddress: 5439 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 5440 case ISD::JumpTable: 5441 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 5442 case ISD::ConstantPool: 5443 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 5444 case ISD::BITCAST: 5445 return lowerBITCAST(Op, DAG); 5446 case ISD::VASTART: 5447 return lowerVASTART(Op, DAG); 5448 case ISD::VACOPY: 5449 return lowerVACOPY(Op, DAG); 5450 case ISD::DYNAMIC_STACKALLOC: 5451 return lowerDYNAMIC_STACKALLOC(Op, DAG); 5452 case ISD::GET_DYNAMIC_AREA_OFFSET: 5453 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 5454 case ISD::SMUL_LOHI: 5455 return lowerSMUL_LOHI(Op, DAG); 5456 case ISD::UMUL_LOHI: 5457 return lowerUMUL_LOHI(Op, DAG); 5458 case ISD::SDIVREM: 5459 return lowerSDIVREM(Op, DAG); 5460 case ISD::UDIVREM: 5461 return lowerUDIVREM(Op, DAG); 5462 case ISD::SADDO: 5463 case ISD::SSUBO: 5464 case ISD::UADDO: 5465 case ISD::USUBO: 5466 return lowerXALUO(Op, DAG); 5467 case ISD::ADDCARRY: 5468 case ISD::SUBCARRY: 5469 return lowerADDSUBCARRY(Op, DAG); 5470 case ISD::OR: 5471 return lowerOR(Op, DAG); 5472 case ISD::CTPOP: 5473 return lowerCTPOP(Op, DAG); 5474 case ISD::ATOMIC_FENCE: 5475 return lowerATOMIC_FENCE(Op, DAG); 5476 case ISD::ATOMIC_SWAP: 5477 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); 5478 case ISD::ATOMIC_STORE: 5479 return lowerATOMIC_STORE(Op, DAG); 5480 case ISD::ATOMIC_LOAD: 5481 return lowerATOMIC_LOAD(Op, DAG); 5482 case ISD::ATOMIC_LOAD_ADD: 5483 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 5484 case ISD::ATOMIC_LOAD_SUB: 5485 return lowerATOMIC_LOAD_SUB(Op, DAG); 5486 case ISD::ATOMIC_LOAD_AND: 5487 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 5488 case ISD::ATOMIC_LOAD_OR: 5489 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 5490 case ISD::ATOMIC_LOAD_XOR: 5491 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 5492 case ISD::ATOMIC_LOAD_NAND: 5493 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 5494 case ISD::ATOMIC_LOAD_MIN: 5495 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 5496 case ISD::ATOMIC_LOAD_MAX: 5497 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 5498 case ISD::ATOMIC_LOAD_UMIN: 5499 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 5500 case ISD::ATOMIC_LOAD_UMAX: 5501 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 5502 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 5503 return lowerATOMIC_CMP_SWAP(Op, DAG); 5504 case ISD::STACKSAVE: 5505 return lowerSTACKSAVE(Op, DAG); 5506 case ISD::STACKRESTORE: 5507 return lowerSTACKRESTORE(Op, DAG); 5508 case ISD::PREFETCH: 5509 return lowerPREFETCH(Op, DAG); 5510 case ISD::INTRINSIC_W_CHAIN: 5511 return lowerINTRINSIC_W_CHAIN(Op, DAG); 5512 case ISD::INTRINSIC_WO_CHAIN: 5513 return lowerINTRINSIC_WO_CHAIN(Op, DAG); 5514 case ISD::BUILD_VECTOR: 5515 return lowerBUILD_VECTOR(Op, DAG); 5516 case ISD::VECTOR_SHUFFLE: 5517 return lowerVECTOR_SHUFFLE(Op, DAG); 5518 case ISD::SCALAR_TO_VECTOR: 5519 return lowerSCALAR_TO_VECTOR(Op, DAG); 5520 case ISD::INSERT_VECTOR_ELT: 5521 return lowerINSERT_VECTOR_ELT(Op, DAG); 5522 case ISD::EXTRACT_VECTOR_ELT: 5523 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 5524 case ISD::SIGN_EXTEND_VECTOR_INREG: 5525 return lowerSIGN_EXTEND_VECTOR_INREG(Op, DAG); 5526 case ISD::ZERO_EXTEND_VECTOR_INREG: 5527 return lowerZERO_EXTEND_VECTOR_INREG(Op, DAG); 5528 case ISD::SHL: 5529 return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR); 5530 case ISD::SRL: 5531 return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR); 5532 case ISD::SRA: 5533 return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR); 5534 default: 5535 llvm_unreachable("Unexpected node to lower"); 5536 } 5537 } 5538 5539 // Lower operations with invalid operand or result types (currently used 5540 // only for 128-bit integer types). 5541 void 5542 SystemZTargetLowering::LowerOperationWrapper(SDNode *N, 5543 SmallVectorImpl<SDValue> &Results, 5544 SelectionDAG &DAG) const { 5545 switch (N->getOpcode()) { 5546 case ISD::ATOMIC_LOAD: { 5547 SDLoc DL(N); 5548 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other); 5549 SDValue Ops[] = { N->getOperand(0), N->getOperand(1) }; 5550 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 5551 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128, 5552 DL, Tys, Ops, MVT::i128, MMO); 5553 Results.push_back(lowerGR128ToI128(DAG, Res)); 5554 Results.push_back(Res.getValue(1)); 5555 break; 5556 } 5557 case ISD::ATOMIC_STORE: { 5558 SDLoc DL(N); 5559 SDVTList Tys = DAG.getVTList(MVT::Other); 5560 SDValue Ops[] = { N->getOperand(0), 5561 lowerI128ToGR128(DAG, N->getOperand(2)), 5562 N->getOperand(1) }; 5563 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 5564 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128, 5565 DL, Tys, Ops, MVT::i128, MMO); 5566 // We have to enforce sequential consistency by performing a 5567 // serialization operation after the store. 5568 if (cast<AtomicSDNode>(N)->getSuccessOrdering() == 5569 AtomicOrdering::SequentiallyConsistent) 5570 Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, 5571 MVT::Other, Res), 0); 5572 Results.push_back(Res); 5573 break; 5574 } 5575 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: { 5576 SDLoc DL(N); 5577 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other); 5578 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 5579 lowerI128ToGR128(DAG, N->getOperand(2)), 5580 lowerI128ToGR128(DAG, N->getOperand(3)) }; 5581 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 5582 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128, 5583 DL, Tys, Ops, MVT::i128, MMO); 5584 SDValue Success = emitSETCC(DAG, DL, Res.getValue(1), 5585 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); 5586 Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1)); 5587 Results.push_back(lowerGR128ToI128(DAG, Res)); 5588 Results.push_back(Success); 5589 Results.push_back(Res.getValue(2)); 5590 break; 5591 } 5592 default: 5593 llvm_unreachable("Unexpected node to lower"); 5594 } 5595 } 5596 5597 void 5598 SystemZTargetLowering::ReplaceNodeResults(SDNode *N, 5599 SmallVectorImpl<SDValue> &Results, 5600 SelectionDAG &DAG) const { 5601 return LowerOperationWrapper(N, Results, DAG); 5602 } 5603 5604 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 5605 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 5606 switch ((SystemZISD::NodeType)Opcode) { 5607 case SystemZISD::FIRST_NUMBER: break; 5608 OPCODE(RET_FLAG); 5609 OPCODE(CALL); 5610 OPCODE(SIBCALL); 5611 OPCODE(TLS_GDCALL); 5612 OPCODE(TLS_LDCALL); 5613 OPCODE(PCREL_WRAPPER); 5614 OPCODE(PCREL_OFFSET); 5615 OPCODE(ICMP); 5616 OPCODE(FCMP); 5617 OPCODE(STRICT_FCMP); 5618 OPCODE(STRICT_FCMPS); 5619 OPCODE(TM); 5620 OPCODE(BR_CCMASK); 5621 OPCODE(SELECT_CCMASK); 5622 OPCODE(ADJDYNALLOC); 5623 OPCODE(PROBED_ALLOCA); 5624 OPCODE(POPCNT); 5625 OPCODE(SMUL_LOHI); 5626 OPCODE(UMUL_LOHI); 5627 OPCODE(SDIVREM); 5628 OPCODE(UDIVREM); 5629 OPCODE(SADDO); 5630 OPCODE(SSUBO); 5631 OPCODE(UADDO); 5632 OPCODE(USUBO); 5633 OPCODE(ADDCARRY); 5634 OPCODE(SUBCARRY); 5635 OPCODE(GET_CCMASK); 5636 OPCODE(MVC); 5637 OPCODE(MVC_LOOP); 5638 OPCODE(NC); 5639 OPCODE(NC_LOOP); 5640 OPCODE(OC); 5641 OPCODE(OC_LOOP); 5642 OPCODE(XC); 5643 OPCODE(XC_LOOP); 5644 OPCODE(CLC); 5645 OPCODE(CLC_LOOP); 5646 OPCODE(STPCPY); 5647 OPCODE(STRCMP); 5648 OPCODE(SEARCH_STRING); 5649 OPCODE(IPM); 5650 OPCODE(MEMBARRIER); 5651 OPCODE(TBEGIN); 5652 OPCODE(TBEGIN_NOFLOAT); 5653 OPCODE(TEND); 5654 OPCODE(BYTE_MASK); 5655 OPCODE(ROTATE_MASK); 5656 OPCODE(REPLICATE); 5657 OPCODE(JOIN_DWORDS); 5658 OPCODE(SPLAT); 5659 OPCODE(MERGE_HIGH); 5660 OPCODE(MERGE_LOW); 5661 OPCODE(SHL_DOUBLE); 5662 OPCODE(PERMUTE_DWORDS); 5663 OPCODE(PERMUTE); 5664 OPCODE(PACK); 5665 OPCODE(PACKS_CC); 5666 OPCODE(PACKLS_CC); 5667 OPCODE(UNPACK_HIGH); 5668 OPCODE(UNPACKL_HIGH); 5669 OPCODE(UNPACK_LOW); 5670 OPCODE(UNPACKL_LOW); 5671 OPCODE(VSHL_BY_SCALAR); 5672 OPCODE(VSRL_BY_SCALAR); 5673 OPCODE(VSRA_BY_SCALAR); 5674 OPCODE(VSUM); 5675 OPCODE(VICMPE); 5676 OPCODE(VICMPH); 5677 OPCODE(VICMPHL); 5678 OPCODE(VICMPES); 5679 OPCODE(VICMPHS); 5680 OPCODE(VICMPHLS); 5681 OPCODE(VFCMPE); 5682 OPCODE(STRICT_VFCMPE); 5683 OPCODE(STRICT_VFCMPES); 5684 OPCODE(VFCMPH); 5685 OPCODE(STRICT_VFCMPH); 5686 OPCODE(STRICT_VFCMPHS); 5687 OPCODE(VFCMPHE); 5688 OPCODE(STRICT_VFCMPHE); 5689 OPCODE(STRICT_VFCMPHES); 5690 OPCODE(VFCMPES); 5691 OPCODE(VFCMPHS); 5692 OPCODE(VFCMPHES); 5693 OPCODE(VFTCI); 5694 OPCODE(VEXTEND); 5695 OPCODE(STRICT_VEXTEND); 5696 OPCODE(VROUND); 5697 OPCODE(STRICT_VROUND); 5698 OPCODE(VTM); 5699 OPCODE(VFAE_CC); 5700 OPCODE(VFAEZ_CC); 5701 OPCODE(VFEE_CC); 5702 OPCODE(VFEEZ_CC); 5703 OPCODE(VFENE_CC); 5704 OPCODE(VFENEZ_CC); 5705 OPCODE(VISTR_CC); 5706 OPCODE(VSTRC_CC); 5707 OPCODE(VSTRCZ_CC); 5708 OPCODE(VSTRS_CC); 5709 OPCODE(VSTRSZ_CC); 5710 OPCODE(TDC); 5711 OPCODE(ATOMIC_SWAPW); 5712 OPCODE(ATOMIC_LOADW_ADD); 5713 OPCODE(ATOMIC_LOADW_SUB); 5714 OPCODE(ATOMIC_LOADW_AND); 5715 OPCODE(ATOMIC_LOADW_OR); 5716 OPCODE(ATOMIC_LOADW_XOR); 5717 OPCODE(ATOMIC_LOADW_NAND); 5718 OPCODE(ATOMIC_LOADW_MIN); 5719 OPCODE(ATOMIC_LOADW_MAX); 5720 OPCODE(ATOMIC_LOADW_UMIN); 5721 OPCODE(ATOMIC_LOADW_UMAX); 5722 OPCODE(ATOMIC_CMP_SWAPW); 5723 OPCODE(ATOMIC_CMP_SWAP); 5724 OPCODE(ATOMIC_LOAD_128); 5725 OPCODE(ATOMIC_STORE_128); 5726 OPCODE(ATOMIC_CMP_SWAP_128); 5727 OPCODE(LRV); 5728 OPCODE(STRV); 5729 OPCODE(VLER); 5730 OPCODE(VSTER); 5731 OPCODE(PREFETCH); 5732 } 5733 return nullptr; 5734 #undef OPCODE 5735 } 5736 5737 // Return true if VT is a vector whose elements are a whole number of bytes 5738 // in width. Also check for presence of vector support. 5739 bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const { 5740 if (!Subtarget.hasVector()) 5741 return false; 5742 5743 return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple(); 5744 } 5745 5746 // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT 5747 // producing a result of type ResVT. Op is a possibly bitcast version 5748 // of the input vector and Index is the index (based on type VecVT) that 5749 // should be extracted. Return the new extraction if a simplification 5750 // was possible or if Force is true. 5751 SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT, 5752 EVT VecVT, SDValue Op, 5753 unsigned Index, 5754 DAGCombinerInfo &DCI, 5755 bool Force) const { 5756 SelectionDAG &DAG = DCI.DAG; 5757 5758 // The number of bytes being extracted. 5759 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); 5760 5761 for (;;) { 5762 unsigned Opcode = Op.getOpcode(); 5763 if (Opcode == ISD::BITCAST) 5764 // Look through bitcasts. 5765 Op = Op.getOperand(0); 5766 else if ((Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) && 5767 canTreatAsByteVector(Op.getValueType())) { 5768 // Get a VPERM-like permute mask and see whether the bytes covered 5769 // by the extracted element are a contiguous sequence from one 5770 // source operand. 5771 SmallVector<int, SystemZ::VectorBytes> Bytes; 5772 if (!getVPermMask(Op, Bytes)) 5773 break; 5774 int First; 5775 if (!getShuffleInput(Bytes, Index * BytesPerElement, 5776 BytesPerElement, First)) 5777 break; 5778 if (First < 0) 5779 return DAG.getUNDEF(ResVT); 5780 // Make sure the contiguous sequence starts at a multiple of the 5781 // original element size. 5782 unsigned Byte = unsigned(First) % Bytes.size(); 5783 if (Byte % BytesPerElement != 0) 5784 break; 5785 // We can get the extracted value directly from an input. 5786 Index = Byte / BytesPerElement; 5787 Op = Op.getOperand(unsigned(First) / Bytes.size()); 5788 Force = true; 5789 } else if (Opcode == ISD::BUILD_VECTOR && 5790 canTreatAsByteVector(Op.getValueType())) { 5791 // We can only optimize this case if the BUILD_VECTOR elements are 5792 // at least as wide as the extracted value. 5793 EVT OpVT = Op.getValueType(); 5794 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); 5795 if (OpBytesPerElement < BytesPerElement) 5796 break; 5797 // Make sure that the least-significant bit of the extracted value 5798 // is the least significant bit of an input. 5799 unsigned End = (Index + 1) * BytesPerElement; 5800 if (End % OpBytesPerElement != 0) 5801 break; 5802 // We're extracting the low part of one operand of the BUILD_VECTOR. 5803 Op = Op.getOperand(End / OpBytesPerElement - 1); 5804 if (!Op.getValueType().isInteger()) { 5805 EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits()); 5806 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); 5807 DCI.AddToWorklist(Op.getNode()); 5808 } 5809 EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits()); 5810 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); 5811 if (VT != ResVT) { 5812 DCI.AddToWorklist(Op.getNode()); 5813 Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op); 5814 } 5815 return Op; 5816 } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || 5817 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG || 5818 Opcode == ISD::ANY_EXTEND_VECTOR_INREG) && 5819 canTreatAsByteVector(Op.getValueType()) && 5820 canTreatAsByteVector(Op.getOperand(0).getValueType())) { 5821 // Make sure that only the unextended bits are significant. 5822 EVT ExtVT = Op.getValueType(); 5823 EVT OpVT = Op.getOperand(0).getValueType(); 5824 unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize(); 5825 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); 5826 unsigned Byte = Index * BytesPerElement; 5827 unsigned SubByte = Byte % ExtBytesPerElement; 5828 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement; 5829 if (SubByte < MinSubByte || 5830 SubByte + BytesPerElement > ExtBytesPerElement) 5831 break; 5832 // Get the byte offset of the unextended element 5833 Byte = Byte / ExtBytesPerElement * OpBytesPerElement; 5834 // ...then add the byte offset relative to that element. 5835 Byte += SubByte - MinSubByte; 5836 if (Byte % BytesPerElement != 0) 5837 break; 5838 Op = Op.getOperand(0); 5839 Index = Byte / BytesPerElement; 5840 Force = true; 5841 } else 5842 break; 5843 } 5844 if (Force) { 5845 if (Op.getValueType() != VecVT) { 5846 Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op); 5847 DCI.AddToWorklist(Op.getNode()); 5848 } 5849 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op, 5850 DAG.getConstant(Index, DL, MVT::i32)); 5851 } 5852 return SDValue(); 5853 } 5854 5855 // Optimize vector operations in scalar value Op on the basis that Op 5856 // is truncated to TruncVT. 5857 SDValue SystemZTargetLowering::combineTruncateExtract( 5858 const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const { 5859 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into 5860 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements 5861 // of type TruncVT. 5862 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5863 TruncVT.getSizeInBits() % 8 == 0) { 5864 SDValue Vec = Op.getOperand(0); 5865 EVT VecVT = Vec.getValueType(); 5866 if (canTreatAsByteVector(VecVT)) { 5867 if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 5868 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); 5869 unsigned TruncBytes = TruncVT.getStoreSize(); 5870 if (BytesPerElement % TruncBytes == 0) { 5871 // Calculate the value of Y' in the above description. We are 5872 // splitting the original elements into Scale equal-sized pieces 5873 // and for truncation purposes want the last (least-significant) 5874 // of these pieces for IndexN. This is easiest to do by calculating 5875 // the start index of the following element and then subtracting 1. 5876 unsigned Scale = BytesPerElement / TruncBytes; 5877 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1; 5878 5879 // Defer the creation of the bitcast from X to combineExtract, 5880 // which might be able to optimize the extraction. 5881 VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8), 5882 VecVT.getStoreSize() / TruncBytes); 5883 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT); 5884 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true); 5885 } 5886 } 5887 } 5888 } 5889 return SDValue(); 5890 } 5891 5892 SDValue SystemZTargetLowering::combineZERO_EXTEND( 5893 SDNode *N, DAGCombinerInfo &DCI) const { 5894 // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2') 5895 SelectionDAG &DAG = DCI.DAG; 5896 SDValue N0 = N->getOperand(0); 5897 EVT VT = N->getValueType(0); 5898 if (N0.getOpcode() == SystemZISD::SELECT_CCMASK) { 5899 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.getOperand(0)); 5900 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5901 if (TrueOp && FalseOp) { 5902 SDLoc DL(N0); 5903 SDValue Ops[] = { DAG.getConstant(TrueOp->getZExtValue(), DL, VT), 5904 DAG.getConstant(FalseOp->getZExtValue(), DL, VT), 5905 N0.getOperand(2), N0.getOperand(3), N0.getOperand(4) }; 5906 SDValue NewSelect = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VT, Ops); 5907 // If N0 has multiple uses, change other uses as well. 5908 if (!N0.hasOneUse()) { 5909 SDValue TruncSelect = 5910 DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), NewSelect); 5911 DCI.CombineTo(N0.getNode(), TruncSelect); 5912 } 5913 return NewSelect; 5914 } 5915 } 5916 return SDValue(); 5917 } 5918 5919 SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG( 5920 SDNode *N, DAGCombinerInfo &DCI) const { 5921 // Convert (sext_in_reg (setcc LHS, RHS, COND), i1) 5922 // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1) 5923 // into (select_cc LHS, RHS, -1, 0, COND) 5924 SelectionDAG &DAG = DCI.DAG; 5925 SDValue N0 = N->getOperand(0); 5926 EVT VT = N->getValueType(0); 5927 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 5928 if (N0.hasOneUse() && N0.getOpcode() == ISD::ANY_EXTEND) 5929 N0 = N0.getOperand(0); 5930 if (EVT == MVT::i1 && N0.hasOneUse() && N0.getOpcode() == ISD::SETCC) { 5931 SDLoc DL(N0); 5932 SDValue Ops[] = { N0.getOperand(0), N0.getOperand(1), 5933 DAG.getConstant(-1, DL, VT), DAG.getConstant(0, DL, VT), 5934 N0.getOperand(2) }; 5935 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 5936 } 5937 return SDValue(); 5938 } 5939 5940 SDValue SystemZTargetLowering::combineSIGN_EXTEND( 5941 SDNode *N, DAGCombinerInfo &DCI) const { 5942 // Convert (sext (ashr (shl X, C1), C2)) to 5943 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as 5944 // cheap as narrower ones. 5945 SelectionDAG &DAG = DCI.DAG; 5946 SDValue N0 = N->getOperand(0); 5947 EVT VT = N->getValueType(0); 5948 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) { 5949 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5950 SDValue Inner = N0.getOperand(0); 5951 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) { 5952 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { 5953 unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits()); 5954 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; 5955 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; 5956 EVT ShiftVT = N0.getOperand(1).getValueType(); 5957 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, 5958 Inner.getOperand(0)); 5959 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, 5960 DAG.getConstant(NewShlAmt, SDLoc(Inner), 5961 ShiftVT)); 5962 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, 5963 DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT)); 5964 } 5965 } 5966 } 5967 return SDValue(); 5968 } 5969 5970 SDValue SystemZTargetLowering::combineMERGE( 5971 SDNode *N, DAGCombinerInfo &DCI) const { 5972 SelectionDAG &DAG = DCI.DAG; 5973 unsigned Opcode = N->getOpcode(); 5974 SDValue Op0 = N->getOperand(0); 5975 SDValue Op1 = N->getOperand(1); 5976 if (Op0.getOpcode() == ISD::BITCAST) 5977 Op0 = Op0.getOperand(0); 5978 if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 5979 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF 5980 // for v4f32. 5981 if (Op1 == N->getOperand(0)) 5982 return Op1; 5983 // (z_merge_? 0, X) -> (z_unpackl_? 0, X). 5984 EVT VT = Op1.getValueType(); 5985 unsigned ElemBytes = VT.getVectorElementType().getStoreSize(); 5986 if (ElemBytes <= 4) { 5987 Opcode = (Opcode == SystemZISD::MERGE_HIGH ? 5988 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW); 5989 EVT InVT = VT.changeVectorElementTypeToInteger(); 5990 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16), 5991 SystemZ::VectorBytes / ElemBytes / 2); 5992 if (VT != InVT) { 5993 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1); 5994 DCI.AddToWorklist(Op1.getNode()); 5995 } 5996 SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1); 5997 DCI.AddToWorklist(Op.getNode()); 5998 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 5999 } 6000 } 6001 return SDValue(); 6002 } 6003 6004 SDValue SystemZTargetLowering::combineLOAD( 6005 SDNode *N, DAGCombinerInfo &DCI) const { 6006 SelectionDAG &DAG = DCI.DAG; 6007 EVT LdVT = N->getValueType(0); 6008 if (LdVT.isVector() || LdVT.isInteger()) 6009 return SDValue(); 6010 // Transform a scalar load that is REPLICATEd as well as having other 6011 // use(s) to the form where the other use(s) use the first element of the 6012 // REPLICATE instead of the load. Otherwise instruction selection will not 6013 // produce a VLREP. Avoid extracting to a GPR, so only do this for floating 6014 // point loads. 6015 6016 SDValue Replicate; 6017 SmallVector<SDNode*, 8> OtherUses; 6018 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 6019 UI != UE; ++UI) { 6020 if (UI->getOpcode() == SystemZISD::REPLICATE) { 6021 if (Replicate) 6022 return SDValue(); // Should never happen 6023 Replicate = SDValue(*UI, 0); 6024 } 6025 else if (UI.getUse().getResNo() == 0) 6026 OtherUses.push_back(*UI); 6027 } 6028 if (!Replicate || OtherUses.empty()) 6029 return SDValue(); 6030 6031 SDLoc DL(N); 6032 SDValue Extract0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, LdVT, 6033 Replicate, DAG.getConstant(0, DL, MVT::i32)); 6034 // Update uses of the loaded Value while preserving old chains. 6035 for (SDNode *U : OtherUses) { 6036 SmallVector<SDValue, 8> Ops; 6037 for (SDValue Op : U->ops()) 6038 Ops.push_back((Op.getNode() == N && Op.getResNo() == 0) ? Extract0 : Op); 6039 DAG.UpdateNodeOperands(U, Ops); 6040 } 6041 return SDValue(N, 0); 6042 } 6043 6044 bool SystemZTargetLowering::canLoadStoreByteSwapped(EVT VT) const { 6045 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) 6046 return true; 6047 if (Subtarget.hasVectorEnhancements2()) 6048 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64) 6049 return true; 6050 return false; 6051 } 6052 6053 static bool isVectorElementSwap(ArrayRef<int> M, EVT VT) { 6054 if (!VT.isVector() || !VT.isSimple() || 6055 VT.getSizeInBits() != 128 || 6056 VT.getScalarSizeInBits() % 8 != 0) 6057 return false; 6058 6059 unsigned NumElts = VT.getVectorNumElements(); 6060 for (unsigned i = 0; i < NumElts; ++i) { 6061 if (M[i] < 0) continue; // ignore UNDEF indices 6062 if ((unsigned) M[i] != NumElts - 1 - i) 6063 return false; 6064 } 6065 6066 return true; 6067 } 6068 6069 SDValue SystemZTargetLowering::combineSTORE( 6070 SDNode *N, DAGCombinerInfo &DCI) const { 6071 SelectionDAG &DAG = DCI.DAG; 6072 auto *SN = cast<StoreSDNode>(N); 6073 auto &Op1 = N->getOperand(1); 6074 EVT MemVT = SN->getMemoryVT(); 6075 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better 6076 // for the extraction to be done on a vMiN value, so that we can use VSTE. 6077 // If X has wider elements then convert it to: 6078 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z). 6079 if (MemVT.isInteger() && SN->isTruncatingStore()) { 6080 if (SDValue Value = 6081 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) { 6082 DCI.AddToWorklist(Value.getNode()); 6083 6084 // Rewrite the store with the new form of stored value. 6085 return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value, 6086 SN->getBasePtr(), SN->getMemoryVT(), 6087 SN->getMemOperand()); 6088 } 6089 } 6090 // Combine STORE (BSWAP) into STRVH/STRV/STRVG/VSTBR 6091 if (!SN->isTruncatingStore() && 6092 Op1.getOpcode() == ISD::BSWAP && 6093 Op1.getNode()->hasOneUse() && 6094 canLoadStoreByteSwapped(Op1.getValueType())) { 6095 6096 SDValue BSwapOp = Op1.getOperand(0); 6097 6098 if (BSwapOp.getValueType() == MVT::i16) 6099 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp); 6100 6101 SDValue Ops[] = { 6102 N->getOperand(0), BSwapOp, N->getOperand(2) 6103 }; 6104 6105 return 6106 DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other), 6107 Ops, MemVT, SN->getMemOperand()); 6108 } 6109 // Combine STORE (element-swap) into VSTER 6110 if (!SN->isTruncatingStore() && 6111 Op1.getOpcode() == ISD::VECTOR_SHUFFLE && 6112 Op1.getNode()->hasOneUse() && 6113 Subtarget.hasVectorEnhancements2()) { 6114 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op1.getNode()); 6115 ArrayRef<int> ShuffleMask = SVN->getMask(); 6116 if (isVectorElementSwap(ShuffleMask, Op1.getValueType())) { 6117 SDValue Ops[] = { 6118 N->getOperand(0), Op1.getOperand(0), N->getOperand(2) 6119 }; 6120 6121 return DAG.getMemIntrinsicNode(SystemZISD::VSTER, SDLoc(N), 6122 DAG.getVTList(MVT::Other), 6123 Ops, MemVT, SN->getMemOperand()); 6124 } 6125 } 6126 6127 return SDValue(); 6128 } 6129 6130 SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE( 6131 SDNode *N, DAGCombinerInfo &DCI) const { 6132 SelectionDAG &DAG = DCI.DAG; 6133 // Combine element-swap (LOAD) into VLER 6134 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 6135 N->getOperand(0).hasOneUse() && 6136 Subtarget.hasVectorEnhancements2()) { 6137 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 6138 ArrayRef<int> ShuffleMask = SVN->getMask(); 6139 if (isVectorElementSwap(ShuffleMask, N->getValueType(0))) { 6140 SDValue Load = N->getOperand(0); 6141 LoadSDNode *LD = cast<LoadSDNode>(Load); 6142 6143 // Create the element-swapping load. 6144 SDValue Ops[] = { 6145 LD->getChain(), // Chain 6146 LD->getBasePtr() // Ptr 6147 }; 6148 SDValue ESLoad = 6149 DAG.getMemIntrinsicNode(SystemZISD::VLER, SDLoc(N), 6150 DAG.getVTList(LD->getValueType(0), MVT::Other), 6151 Ops, LD->getMemoryVT(), LD->getMemOperand()); 6152 6153 // First, combine the VECTOR_SHUFFLE away. This makes the value produced 6154 // by the load dead. 6155 DCI.CombineTo(N, ESLoad); 6156 6157 // Next, combine the load away, we give it a bogus result value but a real 6158 // chain result. The result value is dead because the shuffle is dead. 6159 DCI.CombineTo(Load.getNode(), ESLoad, ESLoad.getValue(1)); 6160 6161 // Return N so it doesn't get rechecked! 6162 return SDValue(N, 0); 6163 } 6164 } 6165 6166 return SDValue(); 6167 } 6168 6169 SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT( 6170 SDNode *N, DAGCombinerInfo &DCI) const { 6171 SelectionDAG &DAG = DCI.DAG; 6172 6173 if (!Subtarget.hasVector()) 6174 return SDValue(); 6175 6176 // Look through bitcasts that retain the number of vector elements. 6177 SDValue Op = N->getOperand(0); 6178 if (Op.getOpcode() == ISD::BITCAST && 6179 Op.getValueType().isVector() && 6180 Op.getOperand(0).getValueType().isVector() && 6181 Op.getValueType().getVectorNumElements() == 6182 Op.getOperand(0).getValueType().getVectorNumElements()) 6183 Op = Op.getOperand(0); 6184 6185 // Pull BSWAP out of a vector extraction. 6186 if (Op.getOpcode() == ISD::BSWAP && Op.hasOneUse()) { 6187 EVT VecVT = Op.getValueType(); 6188 EVT EltVT = VecVT.getVectorElementType(); 6189 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), EltVT, 6190 Op.getOperand(0), N->getOperand(1)); 6191 DCI.AddToWorklist(Op.getNode()); 6192 Op = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Op); 6193 if (EltVT != N->getValueType(0)) { 6194 DCI.AddToWorklist(Op.getNode()); 6195 Op = DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Op); 6196 } 6197 return Op; 6198 } 6199 6200 // Try to simplify a vector extraction. 6201 if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 6202 SDValue Op0 = N->getOperand(0); 6203 EVT VecVT = Op0.getValueType(); 6204 return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0, 6205 IndexN->getZExtValue(), DCI, false); 6206 } 6207 return SDValue(); 6208 } 6209 6210 SDValue SystemZTargetLowering::combineJOIN_DWORDS( 6211 SDNode *N, DAGCombinerInfo &DCI) const { 6212 SelectionDAG &DAG = DCI.DAG; 6213 // (join_dwords X, X) == (replicate X) 6214 if (N->getOperand(0) == N->getOperand(1)) 6215 return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0), 6216 N->getOperand(0)); 6217 return SDValue(); 6218 } 6219 6220 static SDValue MergeInputChains(SDNode *N1, SDNode *N2) { 6221 SDValue Chain1 = N1->getOperand(0); 6222 SDValue Chain2 = N2->getOperand(0); 6223 6224 // Trivial case: both nodes take the same chain. 6225 if (Chain1 == Chain2) 6226 return Chain1; 6227 6228 // FIXME - we could handle more complex cases via TokenFactor, 6229 // assuming we can verify that this would not create a cycle. 6230 return SDValue(); 6231 } 6232 6233 SDValue SystemZTargetLowering::combineFP_ROUND( 6234 SDNode *N, DAGCombinerInfo &DCI) const { 6235 6236 if (!Subtarget.hasVector()) 6237 return SDValue(); 6238 6239 // (fpround (extract_vector_elt X 0)) 6240 // (fpround (extract_vector_elt X 1)) -> 6241 // (extract_vector_elt (VROUND X) 0) 6242 // (extract_vector_elt (VROUND X) 2) 6243 // 6244 // This is a special case since the target doesn't really support v2f32s. 6245 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0; 6246 SelectionDAG &DAG = DCI.DAG; 6247 SDValue Op0 = N->getOperand(OpNo); 6248 if (N->getValueType(0) == MVT::f32 && 6249 Op0.hasOneUse() && 6250 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 6251 Op0.getOperand(0).getValueType() == MVT::v2f64 && 6252 Op0.getOperand(1).getOpcode() == ISD::Constant && 6253 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) { 6254 SDValue Vec = Op0.getOperand(0); 6255 for (auto *U : Vec->uses()) { 6256 if (U != Op0.getNode() && 6257 U->hasOneUse() && 6258 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 6259 U->getOperand(0) == Vec && 6260 U->getOperand(1).getOpcode() == ISD::Constant && 6261 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) { 6262 SDValue OtherRound = SDValue(*U->use_begin(), 0); 6263 if (OtherRound.getOpcode() == N->getOpcode() && 6264 OtherRound.getOperand(OpNo) == SDValue(U, 0) && 6265 OtherRound.getValueType() == MVT::f32) { 6266 SDValue VRound, Chain; 6267 if (N->isStrictFPOpcode()) { 6268 Chain = MergeInputChains(N, OtherRound.getNode()); 6269 if (!Chain) 6270 continue; 6271 VRound = DAG.getNode(SystemZISD::STRICT_VROUND, SDLoc(N), 6272 {MVT::v4f32, MVT::Other}, {Chain, Vec}); 6273 Chain = VRound.getValue(1); 6274 } else 6275 VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N), 6276 MVT::v4f32, Vec); 6277 DCI.AddToWorklist(VRound.getNode()); 6278 SDValue Extract1 = 6279 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32, 6280 VRound, DAG.getConstant(2, SDLoc(U), MVT::i32)); 6281 DCI.AddToWorklist(Extract1.getNode()); 6282 DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1); 6283 if (Chain) 6284 DAG.ReplaceAllUsesOfValueWith(OtherRound.getValue(1), Chain); 6285 SDValue Extract0 = 6286 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32, 6287 VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32)); 6288 if (Chain) 6289 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op0), 6290 N->getVTList(), Extract0, Chain); 6291 return Extract0; 6292 } 6293 } 6294 } 6295 } 6296 return SDValue(); 6297 } 6298 6299 SDValue SystemZTargetLowering::combineFP_EXTEND( 6300 SDNode *N, DAGCombinerInfo &DCI) const { 6301 6302 if (!Subtarget.hasVector()) 6303 return SDValue(); 6304 6305 // (fpextend (extract_vector_elt X 0)) 6306 // (fpextend (extract_vector_elt X 2)) -> 6307 // (extract_vector_elt (VEXTEND X) 0) 6308 // (extract_vector_elt (VEXTEND X) 1) 6309 // 6310 // This is a special case since the target doesn't really support v2f32s. 6311 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0; 6312 SelectionDAG &DAG = DCI.DAG; 6313 SDValue Op0 = N->getOperand(OpNo); 6314 if (N->getValueType(0) == MVT::f64 && 6315 Op0.hasOneUse() && 6316 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 6317 Op0.getOperand(0).getValueType() == MVT::v4f32 && 6318 Op0.getOperand(1).getOpcode() == ISD::Constant && 6319 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) { 6320 SDValue Vec = Op0.getOperand(0); 6321 for (auto *U : Vec->uses()) { 6322 if (U != Op0.getNode() && 6323 U->hasOneUse() && 6324 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 6325 U->getOperand(0) == Vec && 6326 U->getOperand(1).getOpcode() == ISD::Constant && 6327 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 2) { 6328 SDValue OtherExtend = SDValue(*U->use_begin(), 0); 6329 if (OtherExtend.getOpcode() == N->getOpcode() && 6330 OtherExtend.getOperand(OpNo) == SDValue(U, 0) && 6331 OtherExtend.getValueType() == MVT::f64) { 6332 SDValue VExtend, Chain; 6333 if (N->isStrictFPOpcode()) { 6334 Chain = MergeInputChains(N, OtherExtend.getNode()); 6335 if (!Chain) 6336 continue; 6337 VExtend = DAG.getNode(SystemZISD::STRICT_VEXTEND, SDLoc(N), 6338 {MVT::v2f64, MVT::Other}, {Chain, Vec}); 6339 Chain = VExtend.getValue(1); 6340 } else 6341 VExtend = DAG.getNode(SystemZISD::VEXTEND, SDLoc(N), 6342 MVT::v2f64, Vec); 6343 DCI.AddToWorklist(VExtend.getNode()); 6344 SDValue Extract1 = 6345 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f64, 6346 VExtend, DAG.getConstant(1, SDLoc(U), MVT::i32)); 6347 DCI.AddToWorklist(Extract1.getNode()); 6348 DAG.ReplaceAllUsesOfValueWith(OtherExtend, Extract1); 6349 if (Chain) 6350 DAG.ReplaceAllUsesOfValueWith(OtherExtend.getValue(1), Chain); 6351 SDValue Extract0 = 6352 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f64, 6353 VExtend, DAG.getConstant(0, SDLoc(Op0), MVT::i32)); 6354 if (Chain) 6355 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op0), 6356 N->getVTList(), Extract0, Chain); 6357 return Extract0; 6358 } 6359 } 6360 } 6361 } 6362 return SDValue(); 6363 } 6364 6365 SDValue SystemZTargetLowering::combineINT_TO_FP( 6366 SDNode *N, DAGCombinerInfo &DCI) const { 6367 if (DCI.Level != BeforeLegalizeTypes) 6368 return SDValue(); 6369 unsigned Opcode = N->getOpcode(); 6370 EVT OutVT = N->getValueType(0); 6371 SelectionDAG &DAG = DCI.DAG; 6372 SDValue Op = N->getOperand(0); 6373 unsigned OutScalarBits = OutVT.getScalarSizeInBits(); 6374 unsigned InScalarBits = Op->getValueType(0).getScalarSizeInBits(); 6375 6376 // Insert an extension before type-legalization to avoid scalarization, e.g.: 6377 // v2f64 = uint_to_fp v2i16 6378 // => 6379 // v2f64 = uint_to_fp (v2i64 zero_extend v2i16) 6380 if (OutVT.isVector() && OutScalarBits > InScalarBits) { 6381 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(OutVT.getScalarSizeInBits()), 6382 OutVT.getVectorNumElements()); 6383 unsigned ExtOpcode = 6384 (Opcode == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND); 6385 SDValue ExtOp = DAG.getNode(ExtOpcode, SDLoc(N), ExtVT, Op); 6386 return DAG.getNode(Opcode, SDLoc(N), OutVT, ExtOp); 6387 } 6388 return SDValue(); 6389 } 6390 6391 SDValue SystemZTargetLowering::combineBSWAP( 6392 SDNode *N, DAGCombinerInfo &DCI) const { 6393 SelectionDAG &DAG = DCI.DAG; 6394 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG/VLBR 6395 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 6396 N->getOperand(0).hasOneUse() && 6397 canLoadStoreByteSwapped(N->getValueType(0))) { 6398 SDValue Load = N->getOperand(0); 6399 LoadSDNode *LD = cast<LoadSDNode>(Load); 6400 6401 // Create the byte-swapping load. 6402 SDValue Ops[] = { 6403 LD->getChain(), // Chain 6404 LD->getBasePtr() // Ptr 6405 }; 6406 EVT LoadVT = N->getValueType(0); 6407 if (LoadVT == MVT::i16) 6408 LoadVT = MVT::i32; 6409 SDValue BSLoad = 6410 DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N), 6411 DAG.getVTList(LoadVT, MVT::Other), 6412 Ops, LD->getMemoryVT(), LD->getMemOperand()); 6413 6414 // If this is an i16 load, insert the truncate. 6415 SDValue ResVal = BSLoad; 6416 if (N->getValueType(0) == MVT::i16) 6417 ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad); 6418 6419 // First, combine the bswap away. This makes the value produced by the 6420 // load dead. 6421 DCI.CombineTo(N, ResVal); 6422 6423 // Next, combine the load away, we give it a bogus result value but a real 6424 // chain result. The result value is dead because the bswap is dead. 6425 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 6426 6427 // Return N so it doesn't get rechecked! 6428 return SDValue(N, 0); 6429 } 6430 6431 // Look through bitcasts that retain the number of vector elements. 6432 SDValue Op = N->getOperand(0); 6433 if (Op.getOpcode() == ISD::BITCAST && 6434 Op.getValueType().isVector() && 6435 Op.getOperand(0).getValueType().isVector() && 6436 Op.getValueType().getVectorNumElements() == 6437 Op.getOperand(0).getValueType().getVectorNumElements()) 6438 Op = Op.getOperand(0); 6439 6440 // Push BSWAP into a vector insertion if at least one side then simplifies. 6441 if (Op.getOpcode() == ISD::INSERT_VECTOR_ELT && Op.hasOneUse()) { 6442 SDValue Vec = Op.getOperand(0); 6443 SDValue Elt = Op.getOperand(1); 6444 SDValue Idx = Op.getOperand(2); 6445 6446 if (DAG.isConstantIntBuildVectorOrConstantInt(Vec) || 6447 Vec.getOpcode() == ISD::BSWAP || Vec.isUndef() || 6448 DAG.isConstantIntBuildVectorOrConstantInt(Elt) || 6449 Elt.getOpcode() == ISD::BSWAP || Elt.isUndef() || 6450 (canLoadStoreByteSwapped(N->getValueType(0)) && 6451 ISD::isNON_EXTLoad(Elt.getNode()) && Elt.hasOneUse())) { 6452 EVT VecVT = N->getValueType(0); 6453 EVT EltVT = N->getValueType(0).getVectorElementType(); 6454 if (VecVT != Vec.getValueType()) { 6455 Vec = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Vec); 6456 DCI.AddToWorklist(Vec.getNode()); 6457 } 6458 if (EltVT != Elt.getValueType()) { 6459 Elt = DAG.getNode(ISD::BITCAST, SDLoc(N), EltVT, Elt); 6460 DCI.AddToWorklist(Elt.getNode()); 6461 } 6462 Vec = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Vec); 6463 DCI.AddToWorklist(Vec.getNode()); 6464 Elt = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Elt); 6465 DCI.AddToWorklist(Elt.getNode()); 6466 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), VecVT, 6467 Vec, Elt, Idx); 6468 } 6469 } 6470 6471 // Push BSWAP into a vector shuffle if at least one side then simplifies. 6472 ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(Op); 6473 if (SV && Op.hasOneUse()) { 6474 SDValue Op0 = Op.getOperand(0); 6475 SDValue Op1 = Op.getOperand(1); 6476 6477 if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) || 6478 Op0.getOpcode() == ISD::BSWAP || Op0.isUndef() || 6479 DAG.isConstantIntBuildVectorOrConstantInt(Op1) || 6480 Op1.getOpcode() == ISD::BSWAP || Op1.isUndef()) { 6481 EVT VecVT = N->getValueType(0); 6482 if (VecVT != Op0.getValueType()) { 6483 Op0 = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Op0); 6484 DCI.AddToWorklist(Op0.getNode()); 6485 } 6486 if (VecVT != Op1.getValueType()) { 6487 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Op1); 6488 DCI.AddToWorklist(Op1.getNode()); 6489 } 6490 Op0 = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Op0); 6491 DCI.AddToWorklist(Op0.getNode()); 6492 Op1 = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Op1); 6493 DCI.AddToWorklist(Op1.getNode()); 6494 return DAG.getVectorShuffle(VecVT, SDLoc(N), Op0, Op1, SV->getMask()); 6495 } 6496 } 6497 6498 return SDValue(); 6499 } 6500 6501 static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) { 6502 // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code 6503 // set by the CCReg instruction using the CCValid / CCMask masks, 6504 // If the CCReg instruction is itself a ICMP testing the condition 6505 // code set by some other instruction, see whether we can directly 6506 // use that condition code. 6507 6508 // Verify that we have an ICMP against some constant. 6509 if (CCValid != SystemZ::CCMASK_ICMP) 6510 return false; 6511 auto *ICmp = CCReg.getNode(); 6512 if (ICmp->getOpcode() != SystemZISD::ICMP) 6513 return false; 6514 auto *CompareLHS = ICmp->getOperand(0).getNode(); 6515 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1)); 6516 if (!CompareRHS) 6517 return false; 6518 6519 // Optimize the case where CompareLHS is a SELECT_CCMASK. 6520 if (CompareLHS->getOpcode() == SystemZISD::SELECT_CCMASK) { 6521 // Verify that we have an appropriate mask for a EQ or NE comparison. 6522 bool Invert = false; 6523 if (CCMask == SystemZ::CCMASK_CMP_NE) 6524 Invert = !Invert; 6525 else if (CCMask != SystemZ::CCMASK_CMP_EQ) 6526 return false; 6527 6528 // Verify that the ICMP compares against one of select values. 6529 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0)); 6530 if (!TrueVal) 6531 return false; 6532 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1)); 6533 if (!FalseVal) 6534 return false; 6535 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue()) 6536 Invert = !Invert; 6537 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue()) 6538 return false; 6539 6540 // Compute the effective CC mask for the new branch or select. 6541 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2)); 6542 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3)); 6543 if (!NewCCValid || !NewCCMask) 6544 return false; 6545 CCValid = NewCCValid->getZExtValue(); 6546 CCMask = NewCCMask->getZExtValue(); 6547 if (Invert) 6548 CCMask ^= CCValid; 6549 6550 // Return the updated CCReg link. 6551 CCReg = CompareLHS->getOperand(4); 6552 return true; 6553 } 6554 6555 // Optimize the case where CompareRHS is (SRA (SHL (IPM))). 6556 if (CompareLHS->getOpcode() == ISD::SRA) { 6557 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1)); 6558 if (!SRACount || SRACount->getZExtValue() != 30) 6559 return false; 6560 auto *SHL = CompareLHS->getOperand(0).getNode(); 6561 if (SHL->getOpcode() != ISD::SHL) 6562 return false; 6563 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1)); 6564 if (!SHLCount || SHLCount->getZExtValue() != 30 - SystemZ::IPM_CC) 6565 return false; 6566 auto *IPM = SHL->getOperand(0).getNode(); 6567 if (IPM->getOpcode() != SystemZISD::IPM) 6568 return false; 6569 6570 // Avoid introducing CC spills (because SRA would clobber CC). 6571 if (!CompareLHS->hasOneUse()) 6572 return false; 6573 // Verify that the ICMP compares against zero. 6574 if (CompareRHS->getZExtValue() != 0) 6575 return false; 6576 6577 // Compute the effective CC mask for the new branch or select. 6578 CCMask = SystemZ::reverseCCMask(CCMask); 6579 6580 // Return the updated CCReg link. 6581 CCReg = IPM->getOperand(0); 6582 return true; 6583 } 6584 6585 return false; 6586 } 6587 6588 SDValue SystemZTargetLowering::combineBR_CCMASK( 6589 SDNode *N, DAGCombinerInfo &DCI) const { 6590 SelectionDAG &DAG = DCI.DAG; 6591 6592 // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK. 6593 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6594 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2)); 6595 if (!CCValid || !CCMask) 6596 return SDValue(); 6597 6598 int CCValidVal = CCValid->getZExtValue(); 6599 int CCMaskVal = CCMask->getZExtValue(); 6600 SDValue Chain = N->getOperand(0); 6601 SDValue CCReg = N->getOperand(4); 6602 6603 if (combineCCMask(CCReg, CCValidVal, CCMaskVal)) 6604 return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0), 6605 Chain, 6606 DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32), 6607 DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32), 6608 N->getOperand(3), CCReg); 6609 return SDValue(); 6610 } 6611 6612 SDValue SystemZTargetLowering::combineSELECT_CCMASK( 6613 SDNode *N, DAGCombinerInfo &DCI) const { 6614 SelectionDAG &DAG = DCI.DAG; 6615 6616 // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK. 6617 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(2)); 6618 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(3)); 6619 if (!CCValid || !CCMask) 6620 return SDValue(); 6621 6622 int CCValidVal = CCValid->getZExtValue(); 6623 int CCMaskVal = CCMask->getZExtValue(); 6624 SDValue CCReg = N->getOperand(4); 6625 6626 if (combineCCMask(CCReg, CCValidVal, CCMaskVal)) 6627 return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), 6628 N->getOperand(0), N->getOperand(1), 6629 DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32), 6630 DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32), 6631 CCReg); 6632 return SDValue(); 6633 } 6634 6635 6636 SDValue SystemZTargetLowering::combineGET_CCMASK( 6637 SDNode *N, DAGCombinerInfo &DCI) const { 6638 6639 // Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible 6640 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6641 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2)); 6642 if (!CCValid || !CCMask) 6643 return SDValue(); 6644 int CCValidVal = CCValid->getZExtValue(); 6645 int CCMaskVal = CCMask->getZExtValue(); 6646 6647 SDValue Select = N->getOperand(0); 6648 if (Select->getOpcode() != SystemZISD::SELECT_CCMASK) 6649 return SDValue(); 6650 6651 auto *SelectCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2)); 6652 auto *SelectCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3)); 6653 if (!SelectCCValid || !SelectCCMask) 6654 return SDValue(); 6655 int SelectCCValidVal = SelectCCValid->getZExtValue(); 6656 int SelectCCMaskVal = SelectCCMask->getZExtValue(); 6657 6658 auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0)); 6659 auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1)); 6660 if (!TrueVal || !FalseVal) 6661 return SDValue(); 6662 if (TrueVal->getZExtValue() != 0 && FalseVal->getZExtValue() == 0) 6663 ; 6664 else if (TrueVal->getZExtValue() == 0 && FalseVal->getZExtValue() != 0) 6665 SelectCCMaskVal ^= SelectCCValidVal; 6666 else 6667 return SDValue(); 6668 6669 if (SelectCCValidVal & ~CCValidVal) 6670 return SDValue(); 6671 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal)) 6672 return SDValue(); 6673 6674 return Select->getOperand(4); 6675 } 6676 6677 SDValue SystemZTargetLowering::combineIntDIVREM( 6678 SDNode *N, DAGCombinerInfo &DCI) const { 6679 SelectionDAG &DAG = DCI.DAG; 6680 EVT VT = N->getValueType(0); 6681 // In the case where the divisor is a vector of constants a cheaper 6682 // sequence of instructions can replace the divide. BuildSDIV is called to 6683 // do this during DAG combining, but it only succeeds when it can build a 6684 // multiplication node. The only option for SystemZ is ISD::SMUL_LOHI, and 6685 // since it is not Legal but Custom it can only happen before 6686 // legalization. Therefore we must scalarize this early before Combine 6687 // 1. For widened vectors, this is already the result of type legalization. 6688 if (DCI.Level == BeforeLegalizeTypes && VT.isVector() && isTypeLegal(VT) && 6689 DAG.isConstantIntBuildVectorOrConstantInt(N->getOperand(1))) 6690 return DAG.UnrollVectorOp(N); 6691 return SDValue(); 6692 } 6693 6694 SDValue SystemZTargetLowering::combineINTRINSIC( 6695 SDNode *N, DAGCombinerInfo &DCI) const { 6696 SelectionDAG &DAG = DCI.DAG; 6697 6698 unsigned Id = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 6699 switch (Id) { 6700 // VECTOR LOAD (RIGHTMOST) WITH LENGTH with a length operand of 15 6701 // or larger is simply a vector load. 6702 case Intrinsic::s390_vll: 6703 case Intrinsic::s390_vlrl: 6704 if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(2))) 6705 if (C->getZExtValue() >= 15) 6706 return DAG.getLoad(N->getValueType(0), SDLoc(N), N->getOperand(0), 6707 N->getOperand(3), MachinePointerInfo()); 6708 break; 6709 // Likewise for VECTOR STORE (RIGHTMOST) WITH LENGTH. 6710 case Intrinsic::s390_vstl: 6711 case Intrinsic::s390_vstrl: 6712 if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(3))) 6713 if (C->getZExtValue() >= 15) 6714 return DAG.getStore(N->getOperand(0), SDLoc(N), N->getOperand(2), 6715 N->getOperand(4), MachinePointerInfo()); 6716 break; 6717 } 6718 6719 return SDValue(); 6720 } 6721 6722 SDValue SystemZTargetLowering::unwrapAddress(SDValue N) const { 6723 if (N->getOpcode() == SystemZISD::PCREL_WRAPPER) 6724 return N->getOperand(0); 6725 return N; 6726 } 6727 6728 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N, 6729 DAGCombinerInfo &DCI) const { 6730 switch(N->getOpcode()) { 6731 default: break; 6732 case ISD::ZERO_EXTEND: return combineZERO_EXTEND(N, DCI); 6733 case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI); 6734 case ISD::SIGN_EXTEND_INREG: return combineSIGN_EXTEND_INREG(N, DCI); 6735 case SystemZISD::MERGE_HIGH: 6736 case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI); 6737 case ISD::LOAD: return combineLOAD(N, DCI); 6738 case ISD::STORE: return combineSTORE(N, DCI); 6739 case ISD::VECTOR_SHUFFLE: return combineVECTOR_SHUFFLE(N, DCI); 6740 case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI); 6741 case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI); 6742 case ISD::STRICT_FP_ROUND: 6743 case ISD::FP_ROUND: return combineFP_ROUND(N, DCI); 6744 case ISD::STRICT_FP_EXTEND: 6745 case ISD::FP_EXTEND: return combineFP_EXTEND(N, DCI); 6746 case ISD::SINT_TO_FP: 6747 case ISD::UINT_TO_FP: return combineINT_TO_FP(N, DCI); 6748 case ISD::BSWAP: return combineBSWAP(N, DCI); 6749 case SystemZISD::BR_CCMASK: return combineBR_CCMASK(N, DCI); 6750 case SystemZISD::SELECT_CCMASK: return combineSELECT_CCMASK(N, DCI); 6751 case SystemZISD::GET_CCMASK: return combineGET_CCMASK(N, DCI); 6752 case ISD::SDIV: 6753 case ISD::UDIV: 6754 case ISD::SREM: 6755 case ISD::UREM: return combineIntDIVREM(N, DCI); 6756 case ISD::INTRINSIC_W_CHAIN: 6757 case ISD::INTRINSIC_VOID: return combineINTRINSIC(N, DCI); 6758 } 6759 6760 return SDValue(); 6761 } 6762 6763 // Return the demanded elements for the OpNo source operand of Op. DemandedElts 6764 // are for Op. 6765 static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, 6766 unsigned OpNo) { 6767 EVT VT = Op.getValueType(); 6768 unsigned NumElts = (VT.isVector() ? VT.getVectorNumElements() : 1); 6769 APInt SrcDemE; 6770 unsigned Opcode = Op.getOpcode(); 6771 if (Opcode == ISD::INTRINSIC_WO_CHAIN) { 6772 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 6773 switch (Id) { 6774 case Intrinsic::s390_vpksh: // PACKS 6775 case Intrinsic::s390_vpksf: 6776 case Intrinsic::s390_vpksg: 6777 case Intrinsic::s390_vpkshs: // PACKS_CC 6778 case Intrinsic::s390_vpksfs: 6779 case Intrinsic::s390_vpksgs: 6780 case Intrinsic::s390_vpklsh: // PACKLS 6781 case Intrinsic::s390_vpklsf: 6782 case Intrinsic::s390_vpklsg: 6783 case Intrinsic::s390_vpklshs: // PACKLS_CC 6784 case Intrinsic::s390_vpklsfs: 6785 case Intrinsic::s390_vpklsgs: 6786 // VECTOR PACK truncates the elements of two source vectors into one. 6787 SrcDemE = DemandedElts; 6788 if (OpNo == 2) 6789 SrcDemE.lshrInPlace(NumElts / 2); 6790 SrcDemE = SrcDemE.trunc(NumElts / 2); 6791 break; 6792 // VECTOR UNPACK extends half the elements of the source vector. 6793 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH 6794 case Intrinsic::s390_vuphh: 6795 case Intrinsic::s390_vuphf: 6796 case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH 6797 case Intrinsic::s390_vuplhh: 6798 case Intrinsic::s390_vuplhf: 6799 SrcDemE = APInt(NumElts * 2, 0); 6800 SrcDemE.insertBits(DemandedElts, 0); 6801 break; 6802 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW 6803 case Intrinsic::s390_vuplhw: 6804 case Intrinsic::s390_vuplf: 6805 case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW 6806 case Intrinsic::s390_vupllh: 6807 case Intrinsic::s390_vupllf: 6808 SrcDemE = APInt(NumElts * 2, 0); 6809 SrcDemE.insertBits(DemandedElts, NumElts); 6810 break; 6811 case Intrinsic::s390_vpdi: { 6812 // VECTOR PERMUTE DWORD IMMEDIATE selects one element from each source. 6813 SrcDemE = APInt(NumElts, 0); 6814 if (!DemandedElts[OpNo - 1]) 6815 break; 6816 unsigned Mask = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 6817 unsigned MaskBit = ((OpNo - 1) ? 1 : 4); 6818 // Demand input element 0 or 1, given by the mask bit value. 6819 SrcDemE.setBit((Mask & MaskBit)? 1 : 0); 6820 break; 6821 } 6822 case Intrinsic::s390_vsldb: { 6823 // VECTOR SHIFT LEFT DOUBLE BY BYTE 6824 assert(VT == MVT::v16i8 && "Unexpected type."); 6825 unsigned FirstIdx = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 6826 assert (FirstIdx > 0 && FirstIdx < 16 && "Unused operand."); 6827 unsigned NumSrc0Els = 16 - FirstIdx; 6828 SrcDemE = APInt(NumElts, 0); 6829 if (OpNo == 1) { 6830 APInt DemEls = DemandedElts.trunc(NumSrc0Els); 6831 SrcDemE.insertBits(DemEls, FirstIdx); 6832 } else { 6833 APInt DemEls = DemandedElts.lshr(NumSrc0Els); 6834 SrcDemE.insertBits(DemEls, 0); 6835 } 6836 break; 6837 } 6838 case Intrinsic::s390_vperm: 6839 SrcDemE = APInt(NumElts, 1); 6840 break; 6841 default: 6842 llvm_unreachable("Unhandled intrinsic."); 6843 break; 6844 } 6845 } else { 6846 switch (Opcode) { 6847 case SystemZISD::JOIN_DWORDS: 6848 // Scalar operand. 6849 SrcDemE = APInt(1, 1); 6850 break; 6851 case SystemZISD::SELECT_CCMASK: 6852 SrcDemE = DemandedElts; 6853 break; 6854 default: 6855 llvm_unreachable("Unhandled opcode."); 6856 break; 6857 } 6858 } 6859 return SrcDemE; 6860 } 6861 6862 static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, 6863 const APInt &DemandedElts, 6864 const SelectionDAG &DAG, unsigned Depth, 6865 unsigned OpNo) { 6866 APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo); 6867 APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1); 6868 KnownBits LHSKnown = 6869 DAG.computeKnownBits(Op.getOperand(OpNo), Src0DemE, Depth + 1); 6870 KnownBits RHSKnown = 6871 DAG.computeKnownBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1); 6872 Known = KnownBits::commonBits(LHSKnown, RHSKnown); 6873 } 6874 6875 void 6876 SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 6877 KnownBits &Known, 6878 const APInt &DemandedElts, 6879 const SelectionDAG &DAG, 6880 unsigned Depth) const { 6881 Known.resetAll(); 6882 6883 // Intrinsic CC result is returned in the two low bits. 6884 unsigned tmp0, tmp1; // not used 6885 if (Op.getResNo() == 1 && isIntrinsicWithCC(Op, tmp0, tmp1)) { 6886 Known.Zero.setBitsFrom(2); 6887 return; 6888 } 6889 EVT VT = Op.getValueType(); 6890 if (Op.getResNo() != 0 || VT == MVT::Untyped) 6891 return; 6892 assert (Known.getBitWidth() == VT.getScalarSizeInBits() && 6893 "KnownBits does not match VT in bitwidth"); 6894 assert ((!VT.isVector() || 6895 (DemandedElts.getBitWidth() == VT.getVectorNumElements())) && 6896 "DemandedElts does not match VT number of elements"); 6897 unsigned BitWidth = Known.getBitWidth(); 6898 unsigned Opcode = Op.getOpcode(); 6899 if (Opcode == ISD::INTRINSIC_WO_CHAIN) { 6900 bool IsLogical = false; 6901 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 6902 switch (Id) { 6903 case Intrinsic::s390_vpksh: // PACKS 6904 case Intrinsic::s390_vpksf: 6905 case Intrinsic::s390_vpksg: 6906 case Intrinsic::s390_vpkshs: // PACKS_CC 6907 case Intrinsic::s390_vpksfs: 6908 case Intrinsic::s390_vpksgs: 6909 case Intrinsic::s390_vpklsh: // PACKLS 6910 case Intrinsic::s390_vpklsf: 6911 case Intrinsic::s390_vpklsg: 6912 case Intrinsic::s390_vpklshs: // PACKLS_CC 6913 case Intrinsic::s390_vpklsfs: 6914 case Intrinsic::s390_vpklsgs: 6915 case Intrinsic::s390_vpdi: 6916 case Intrinsic::s390_vsldb: 6917 case Intrinsic::s390_vperm: 6918 computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 1); 6919 break; 6920 case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH 6921 case Intrinsic::s390_vuplhh: 6922 case Intrinsic::s390_vuplhf: 6923 case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW 6924 case Intrinsic::s390_vupllh: 6925 case Intrinsic::s390_vupllf: 6926 IsLogical = true; 6927 LLVM_FALLTHROUGH; 6928 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH 6929 case Intrinsic::s390_vuphh: 6930 case Intrinsic::s390_vuphf: 6931 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW 6932 case Intrinsic::s390_vuplhw: 6933 case Intrinsic::s390_vuplf: { 6934 SDValue SrcOp = Op.getOperand(1); 6935 APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 0); 6936 Known = DAG.computeKnownBits(SrcOp, SrcDemE, Depth + 1); 6937 if (IsLogical) { 6938 Known = Known.zext(BitWidth); 6939 } else 6940 Known = Known.sext(BitWidth); 6941 break; 6942 } 6943 default: 6944 break; 6945 } 6946 } else { 6947 switch (Opcode) { 6948 case SystemZISD::JOIN_DWORDS: 6949 case SystemZISD::SELECT_CCMASK: 6950 computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 0); 6951 break; 6952 case SystemZISD::REPLICATE: { 6953 SDValue SrcOp = Op.getOperand(0); 6954 Known = DAG.computeKnownBits(SrcOp, Depth + 1); 6955 if (Known.getBitWidth() < BitWidth && isa<ConstantSDNode>(SrcOp)) 6956 Known = Known.sext(BitWidth); // VREPI sign extends the immedate. 6957 break; 6958 } 6959 default: 6960 break; 6961 } 6962 } 6963 6964 // Known has the width of the source operand(s). Adjust if needed to match 6965 // the passed bitwidth. 6966 if (Known.getBitWidth() != BitWidth) 6967 Known = Known.anyextOrTrunc(BitWidth); 6968 } 6969 6970 static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, 6971 const SelectionDAG &DAG, unsigned Depth, 6972 unsigned OpNo) { 6973 APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo); 6974 unsigned LHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo), Src0DemE, Depth + 1); 6975 if (LHS == 1) return 1; // Early out. 6976 APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1); 6977 unsigned RHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1); 6978 if (RHS == 1) return 1; // Early out. 6979 unsigned Common = std::min(LHS, RHS); 6980 unsigned SrcBitWidth = Op.getOperand(OpNo).getScalarValueSizeInBits(); 6981 EVT VT = Op.getValueType(); 6982 unsigned VTBits = VT.getScalarSizeInBits(); 6983 if (SrcBitWidth > VTBits) { // PACK 6984 unsigned SrcExtraBits = SrcBitWidth - VTBits; 6985 if (Common > SrcExtraBits) 6986 return (Common - SrcExtraBits); 6987 return 1; 6988 } 6989 assert (SrcBitWidth == VTBits && "Expected operands of same bitwidth."); 6990 return Common; 6991 } 6992 6993 unsigned 6994 SystemZTargetLowering::ComputeNumSignBitsForTargetNode( 6995 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 6996 unsigned Depth) const { 6997 if (Op.getResNo() != 0) 6998 return 1; 6999 unsigned Opcode = Op.getOpcode(); 7000 if (Opcode == ISD::INTRINSIC_WO_CHAIN) { 7001 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7002 switch (Id) { 7003 case Intrinsic::s390_vpksh: // PACKS 7004 case Intrinsic::s390_vpksf: 7005 case Intrinsic::s390_vpksg: 7006 case Intrinsic::s390_vpkshs: // PACKS_CC 7007 case Intrinsic::s390_vpksfs: 7008 case Intrinsic::s390_vpksgs: 7009 case Intrinsic::s390_vpklsh: // PACKLS 7010 case Intrinsic::s390_vpklsf: 7011 case Intrinsic::s390_vpklsg: 7012 case Intrinsic::s390_vpklshs: // PACKLS_CC 7013 case Intrinsic::s390_vpklsfs: 7014 case Intrinsic::s390_vpklsgs: 7015 case Intrinsic::s390_vpdi: 7016 case Intrinsic::s390_vsldb: 7017 case Intrinsic::s390_vperm: 7018 return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 1); 7019 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH 7020 case Intrinsic::s390_vuphh: 7021 case Intrinsic::s390_vuphf: 7022 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW 7023 case Intrinsic::s390_vuplhw: 7024 case Intrinsic::s390_vuplf: { 7025 SDValue PackedOp = Op.getOperand(1); 7026 APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 1); 7027 unsigned Tmp = DAG.ComputeNumSignBits(PackedOp, SrcDemE, Depth + 1); 7028 EVT VT = Op.getValueType(); 7029 unsigned VTBits = VT.getScalarSizeInBits(); 7030 Tmp += VTBits - PackedOp.getScalarValueSizeInBits(); 7031 return Tmp; 7032 } 7033 default: 7034 break; 7035 } 7036 } else { 7037 switch (Opcode) { 7038 case SystemZISD::SELECT_CCMASK: 7039 return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 0); 7040 default: 7041 break; 7042 } 7043 } 7044 7045 return 1; 7046 } 7047 7048 unsigned 7049 SystemZTargetLowering::getStackProbeSize(MachineFunction &MF) const { 7050 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 7051 unsigned StackAlign = TFI->getStackAlignment(); 7052 assert(StackAlign >=1 && isPowerOf2_32(StackAlign) && 7053 "Unexpected stack alignment"); 7054 // The default stack probe size is 4096 if the function has no 7055 // stack-probe-size attribute. 7056 unsigned StackProbeSize = 4096; 7057 const Function &Fn = MF.getFunction(); 7058 if (Fn.hasFnAttribute("stack-probe-size")) 7059 Fn.getFnAttribute("stack-probe-size") 7060 .getValueAsString() 7061 .getAsInteger(0, StackProbeSize); 7062 // Round down to the stack alignment. 7063 StackProbeSize &= ~(StackAlign - 1); 7064 return StackProbeSize ? StackProbeSize : StackAlign; 7065 } 7066 7067 //===----------------------------------------------------------------------===// 7068 // Custom insertion 7069 //===----------------------------------------------------------------------===// 7070 7071 // Force base value Base into a register before MI. Return the register. 7072 static Register forceReg(MachineInstr &MI, MachineOperand &Base, 7073 const SystemZInstrInfo *TII) { 7074 if (Base.isReg()) 7075 return Base.getReg(); 7076 7077 MachineBasicBlock *MBB = MI.getParent(); 7078 MachineFunction &MF = *MBB->getParent(); 7079 MachineRegisterInfo &MRI = MF.getRegInfo(); 7080 7081 Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 7082 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) 7083 .add(Base) 7084 .addImm(0) 7085 .addReg(0); 7086 return Reg; 7087 } 7088 7089 // The CC operand of MI might be missing a kill marker because there 7090 // were multiple uses of CC, and ISel didn't know which to mark. 7091 // Figure out whether MI should have had a kill marker. 7092 static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB) { 7093 // Scan forward through BB for a use/def of CC. 7094 MachineBasicBlock::iterator miI(std::next(MachineBasicBlock::iterator(MI))); 7095 for (MachineBasicBlock::iterator miE = MBB->end(); miI != miE; ++miI) { 7096 const MachineInstr& mi = *miI; 7097 if (mi.readsRegister(SystemZ::CC)) 7098 return false; 7099 if (mi.definesRegister(SystemZ::CC)) 7100 break; // Should have kill-flag - update below. 7101 } 7102 7103 // If we hit the end of the block, check whether CC is live into a 7104 // successor. 7105 if (miI == MBB->end()) { 7106 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) 7107 if ((*SI)->isLiveIn(SystemZ::CC)) 7108 return false; 7109 } 7110 7111 return true; 7112 } 7113 7114 // Return true if it is OK for this Select pseudo-opcode to be cascaded 7115 // together with other Select pseudo-opcodes into a single basic-block with 7116 // a conditional jump around it. 7117 static bool isSelectPseudo(MachineInstr &MI) { 7118 switch (MI.getOpcode()) { 7119 case SystemZ::Select32: 7120 case SystemZ::Select64: 7121 case SystemZ::SelectF32: 7122 case SystemZ::SelectF64: 7123 case SystemZ::SelectF128: 7124 case SystemZ::SelectVR32: 7125 case SystemZ::SelectVR64: 7126 case SystemZ::SelectVR128: 7127 return true; 7128 7129 default: 7130 return false; 7131 } 7132 } 7133 7134 // Helper function, which inserts PHI functions into SinkMBB: 7135 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ], 7136 // where %FalseValue(i) and %TrueValue(i) are taken from Selects. 7137 static void createPHIsForSelects(SmallVector<MachineInstr*, 8> &Selects, 7138 MachineBasicBlock *TrueMBB, 7139 MachineBasicBlock *FalseMBB, 7140 MachineBasicBlock *SinkMBB) { 7141 MachineFunction *MF = TrueMBB->getParent(); 7142 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 7143 7144 MachineInstr *FirstMI = Selects.front(); 7145 unsigned CCValid = FirstMI->getOperand(3).getImm(); 7146 unsigned CCMask = FirstMI->getOperand(4).getImm(); 7147 7148 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin(); 7149 7150 // As we are creating the PHIs, we have to be careful if there is more than 7151 // one. Later Selects may reference the results of earlier Selects, but later 7152 // PHIs have to reference the individual true/false inputs from earlier PHIs. 7153 // That also means that PHI construction must work forward from earlier to 7154 // later, and that the code must maintain a mapping from earlier PHI's 7155 // destination registers, and the registers that went into the PHI. 7156 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable; 7157 7158 for (auto MI : Selects) { 7159 Register DestReg = MI->getOperand(0).getReg(); 7160 Register TrueReg = MI->getOperand(1).getReg(); 7161 Register FalseReg = MI->getOperand(2).getReg(); 7162 7163 // If this Select we are generating is the opposite condition from 7164 // the jump we generated, then we have to swap the operands for the 7165 // PHI that is going to be generated. 7166 if (MI->getOperand(4).getImm() == (CCValid ^ CCMask)) 7167 std::swap(TrueReg, FalseReg); 7168 7169 if (RegRewriteTable.find(TrueReg) != RegRewriteTable.end()) 7170 TrueReg = RegRewriteTable[TrueReg].first; 7171 7172 if (RegRewriteTable.find(FalseReg) != RegRewriteTable.end()) 7173 FalseReg = RegRewriteTable[FalseReg].second; 7174 7175 DebugLoc DL = MI->getDebugLoc(); 7176 BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(SystemZ::PHI), DestReg) 7177 .addReg(TrueReg).addMBB(TrueMBB) 7178 .addReg(FalseReg).addMBB(FalseMBB); 7179 7180 // Add this PHI to the rewrite table. 7181 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg); 7182 } 7183 7184 MF->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 7185 } 7186 7187 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 7188 MachineBasicBlock * 7189 SystemZTargetLowering::emitSelect(MachineInstr &MI, 7190 MachineBasicBlock *MBB) const { 7191 assert(isSelectPseudo(MI) && "Bad call to emitSelect()"); 7192 const SystemZInstrInfo *TII = 7193 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 7194 7195 unsigned CCValid = MI.getOperand(3).getImm(); 7196 unsigned CCMask = MI.getOperand(4).getImm(); 7197 7198 // If we have a sequence of Select* pseudo instructions using the 7199 // same condition code value, we want to expand all of them into 7200 // a single pair of basic blocks using the same condition. 7201 SmallVector<MachineInstr*, 8> Selects; 7202 SmallVector<MachineInstr*, 8> DbgValues; 7203 Selects.push_back(&MI); 7204 unsigned Count = 0; 7205 for (MachineBasicBlock::iterator NextMIIt = 7206 std::next(MachineBasicBlock::iterator(MI)); 7207 NextMIIt != MBB->end(); ++NextMIIt) { 7208 if (isSelectPseudo(*NextMIIt)) { 7209 assert(NextMIIt->getOperand(3).getImm() == CCValid && 7210 "Bad CCValid operands since CC was not redefined."); 7211 if (NextMIIt->getOperand(4).getImm() == CCMask || 7212 NextMIIt->getOperand(4).getImm() == (CCValid ^ CCMask)) { 7213 Selects.push_back(&*NextMIIt); 7214 continue; 7215 } 7216 break; 7217 } 7218 if (NextMIIt->definesRegister(SystemZ::CC) || 7219 NextMIIt->usesCustomInsertionHook()) 7220 break; 7221 bool User = false; 7222 for (auto SelMI : Selects) 7223 if (NextMIIt->readsVirtualRegister(SelMI->getOperand(0).getReg())) { 7224 User = true; 7225 break; 7226 } 7227 if (NextMIIt->isDebugInstr()) { 7228 if (User) { 7229 assert(NextMIIt->isDebugValue() && "Unhandled debug opcode."); 7230 DbgValues.push_back(&*NextMIIt); 7231 } 7232 } 7233 else if (User || ++Count > 20) 7234 break; 7235 } 7236 7237 MachineInstr *LastMI = Selects.back(); 7238 bool CCKilled = 7239 (LastMI->killsRegister(SystemZ::CC) || checkCCKill(*LastMI, MBB)); 7240 MachineBasicBlock *StartMBB = MBB; 7241 MachineBasicBlock *JoinMBB = SystemZ::splitBlockAfter(LastMI, MBB); 7242 MachineBasicBlock *FalseMBB = SystemZ::emitBlockAfter(StartMBB); 7243 7244 // Unless CC was killed in the last Select instruction, mark it as 7245 // live-in to both FalseMBB and JoinMBB. 7246 if (!CCKilled) { 7247 FalseMBB->addLiveIn(SystemZ::CC); 7248 JoinMBB->addLiveIn(SystemZ::CC); 7249 } 7250 7251 // StartMBB: 7252 // BRC CCMask, JoinMBB 7253 // # fallthrough to FalseMBB 7254 MBB = StartMBB; 7255 BuildMI(MBB, MI.getDebugLoc(), TII->get(SystemZ::BRC)) 7256 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 7257 MBB->addSuccessor(JoinMBB); 7258 MBB->addSuccessor(FalseMBB); 7259 7260 // FalseMBB: 7261 // # fallthrough to JoinMBB 7262 MBB = FalseMBB; 7263 MBB->addSuccessor(JoinMBB); 7264 7265 // JoinMBB: 7266 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 7267 // ... 7268 MBB = JoinMBB; 7269 createPHIsForSelects(Selects, StartMBB, FalseMBB, MBB); 7270 for (auto SelMI : Selects) 7271 SelMI->eraseFromParent(); 7272 7273 MachineBasicBlock::iterator InsertPos = MBB->getFirstNonPHI(); 7274 for (auto DbgMI : DbgValues) 7275 MBB->splice(InsertPos, StartMBB, DbgMI); 7276 7277 return JoinMBB; 7278 } 7279 7280 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 7281 // StoreOpcode is the store to use and Invert says whether the store should 7282 // happen when the condition is false rather than true. If a STORE ON 7283 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 7284 MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, 7285 MachineBasicBlock *MBB, 7286 unsigned StoreOpcode, 7287 unsigned STOCOpcode, 7288 bool Invert) const { 7289 const SystemZInstrInfo *TII = 7290 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 7291 7292 Register SrcReg = MI.getOperand(0).getReg(); 7293 MachineOperand Base = MI.getOperand(1); 7294 int64_t Disp = MI.getOperand(2).getImm(); 7295 Register IndexReg = MI.getOperand(3).getReg(); 7296 unsigned CCValid = MI.getOperand(4).getImm(); 7297 unsigned CCMask = MI.getOperand(5).getImm(); 7298 DebugLoc DL = MI.getDebugLoc(); 7299 7300 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 7301 7302 // ISel pattern matching also adds a load memory operand of the same 7303 // address, so take special care to find the storing memory operand. 7304 MachineMemOperand *MMO = nullptr; 7305 for (auto *I : MI.memoperands()) 7306 if (I->isStore()) { 7307 MMO = I; 7308 break; 7309 } 7310 7311 // Use STOCOpcode if possible. We could use different store patterns in 7312 // order to avoid matching the index register, but the performance trade-offs 7313 // might be more complicated in that case. 7314 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) { 7315 if (Invert) 7316 CCMask ^= CCValid; 7317 7318 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 7319 .addReg(SrcReg) 7320 .add(Base) 7321 .addImm(Disp) 7322 .addImm(CCValid) 7323 .addImm(CCMask) 7324 .addMemOperand(MMO); 7325 7326 MI.eraseFromParent(); 7327 return MBB; 7328 } 7329 7330 // Get the condition needed to branch around the store. 7331 if (!Invert) 7332 CCMask ^= CCValid; 7333 7334 MachineBasicBlock *StartMBB = MBB; 7335 MachineBasicBlock *JoinMBB = SystemZ::splitBlockBefore(MI, MBB); 7336 MachineBasicBlock *FalseMBB = SystemZ::emitBlockAfter(StartMBB); 7337 7338 // Unless CC was killed in the CondStore instruction, mark it as 7339 // live-in to both FalseMBB and JoinMBB. 7340 if (!MI.killsRegister(SystemZ::CC) && !checkCCKill(MI, JoinMBB)) { 7341 FalseMBB->addLiveIn(SystemZ::CC); 7342 JoinMBB->addLiveIn(SystemZ::CC); 7343 } 7344 7345 // StartMBB: 7346 // BRC CCMask, JoinMBB 7347 // # fallthrough to FalseMBB 7348 MBB = StartMBB; 7349 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7350 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 7351 MBB->addSuccessor(JoinMBB); 7352 MBB->addSuccessor(FalseMBB); 7353 7354 // FalseMBB: 7355 // store %SrcReg, %Disp(%Index,%Base) 7356 // # fallthrough to JoinMBB 7357 MBB = FalseMBB; 7358 BuildMI(MBB, DL, TII->get(StoreOpcode)) 7359 .addReg(SrcReg) 7360 .add(Base) 7361 .addImm(Disp) 7362 .addReg(IndexReg) 7363 .addMemOperand(MMO); 7364 MBB->addSuccessor(JoinMBB); 7365 7366 MI.eraseFromParent(); 7367 return JoinMBB; 7368 } 7369 7370 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 7371 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 7372 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 7373 // BitSize is the width of the field in bits, or 0 if this is a partword 7374 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 7375 // is one of the operands. Invert says whether the field should be 7376 // inverted after performing BinOpcode (e.g. for NAND). 7377 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( 7378 MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode, 7379 unsigned BitSize, bool Invert) const { 7380 MachineFunction &MF = *MBB->getParent(); 7381 const SystemZInstrInfo *TII = 7382 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 7383 MachineRegisterInfo &MRI = MF.getRegInfo(); 7384 bool IsSubWord = (BitSize < 32); 7385 7386 // Extract the operands. Base can be a register or a frame index. 7387 // Src2 can be a register or immediate. 7388 Register Dest = MI.getOperand(0).getReg(); 7389 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 7390 int64_t Disp = MI.getOperand(2).getImm(); 7391 MachineOperand Src2 = earlyUseOperand(MI.getOperand(3)); 7392 Register BitShift = IsSubWord ? MI.getOperand(4).getReg() : Register(); 7393 Register NegBitShift = IsSubWord ? MI.getOperand(5).getReg() : Register(); 7394 DebugLoc DL = MI.getDebugLoc(); 7395 if (IsSubWord) 7396 BitSize = MI.getOperand(6).getImm(); 7397 7398 // Subword operations use 32-bit registers. 7399 const TargetRegisterClass *RC = (BitSize <= 32 ? 7400 &SystemZ::GR32BitRegClass : 7401 &SystemZ::GR64BitRegClass); 7402 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 7403 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 7404 7405 // Get the right opcodes for the displacement. 7406 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 7407 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 7408 assert(LOpcode && CSOpcode && "Displacement out of range"); 7409 7410 // Create virtual registers for temporary results. 7411 Register OrigVal = MRI.createVirtualRegister(RC); 7412 Register OldVal = MRI.createVirtualRegister(RC); 7413 Register NewVal = (BinOpcode || IsSubWord ? 7414 MRI.createVirtualRegister(RC) : Src2.getReg()); 7415 Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 7416 Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 7417 7418 // Insert a basic block for the main loop. 7419 MachineBasicBlock *StartMBB = MBB; 7420 MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB); 7421 MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB); 7422 7423 // StartMBB: 7424 // ... 7425 // %OrigVal = L Disp(%Base) 7426 // # fall through to LoopMBB 7427 MBB = StartMBB; 7428 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); 7429 MBB->addSuccessor(LoopMBB); 7430 7431 // LoopMBB: 7432 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 7433 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 7434 // %RotatedNewVal = OP %RotatedOldVal, %Src2 7435 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 7436 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 7437 // JNE LoopMBB 7438 // # fall through to DoneMBB 7439 MBB = LoopMBB; 7440 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 7441 .addReg(OrigVal).addMBB(StartMBB) 7442 .addReg(Dest).addMBB(LoopMBB); 7443 if (IsSubWord) 7444 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 7445 .addReg(OldVal).addReg(BitShift).addImm(0); 7446 if (Invert) { 7447 // Perform the operation normally and then invert every bit of the field. 7448 Register Tmp = MRI.createVirtualRegister(RC); 7449 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); 7450 if (BitSize <= 32) 7451 // XILF with the upper BitSize bits set. 7452 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 7453 .addReg(Tmp).addImm(-1U << (32 - BitSize)); 7454 else { 7455 // Use LCGR and add -1 to the result, which is more compact than 7456 // an XILF, XILH pair. 7457 Register Tmp2 = MRI.createVirtualRegister(RC); 7458 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 7459 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 7460 .addReg(Tmp2).addImm(-1); 7461 } 7462 } else if (BinOpcode) 7463 // A simply binary operation. 7464 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 7465 .addReg(RotatedOldVal) 7466 .add(Src2); 7467 else if (IsSubWord) 7468 // Use RISBG to rotate Src2 into position and use it to replace the 7469 // field in RotatedOldVal. 7470 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 7471 .addReg(RotatedOldVal).addReg(Src2.getReg()) 7472 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 7473 if (IsSubWord) 7474 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 7475 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 7476 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 7477 .addReg(OldVal) 7478 .addReg(NewVal) 7479 .add(Base) 7480 .addImm(Disp); 7481 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7482 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 7483 MBB->addSuccessor(LoopMBB); 7484 MBB->addSuccessor(DoneMBB); 7485 7486 MI.eraseFromParent(); 7487 return DoneMBB; 7488 } 7489 7490 // Implement EmitInstrWithCustomInserter for pseudo 7491 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 7492 // instruction that should be used to compare the current field with the 7493 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 7494 // for when the current field should be kept. BitSize is the width of 7495 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 7496 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( 7497 MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode, 7498 unsigned KeepOldMask, unsigned BitSize) const { 7499 MachineFunction &MF = *MBB->getParent(); 7500 const SystemZInstrInfo *TII = 7501 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 7502 MachineRegisterInfo &MRI = MF.getRegInfo(); 7503 bool IsSubWord = (BitSize < 32); 7504 7505 // Extract the operands. Base can be a register or a frame index. 7506 Register Dest = MI.getOperand(0).getReg(); 7507 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 7508 int64_t Disp = MI.getOperand(2).getImm(); 7509 Register Src2 = MI.getOperand(3).getReg(); 7510 Register BitShift = (IsSubWord ? MI.getOperand(4).getReg() : Register()); 7511 Register NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : Register()); 7512 DebugLoc DL = MI.getDebugLoc(); 7513 if (IsSubWord) 7514 BitSize = MI.getOperand(6).getImm(); 7515 7516 // Subword operations use 32-bit registers. 7517 const TargetRegisterClass *RC = (BitSize <= 32 ? 7518 &SystemZ::GR32BitRegClass : 7519 &SystemZ::GR64BitRegClass); 7520 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 7521 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 7522 7523 // Get the right opcodes for the displacement. 7524 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 7525 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 7526 assert(LOpcode && CSOpcode && "Displacement out of range"); 7527 7528 // Create virtual registers for temporary results. 7529 Register OrigVal = MRI.createVirtualRegister(RC); 7530 Register OldVal = MRI.createVirtualRegister(RC); 7531 Register NewVal = MRI.createVirtualRegister(RC); 7532 Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 7533 Register RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 7534 Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 7535 7536 // Insert 3 basic blocks for the loop. 7537 MachineBasicBlock *StartMBB = MBB; 7538 MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB); 7539 MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB); 7540 MachineBasicBlock *UseAltMBB = SystemZ::emitBlockAfter(LoopMBB); 7541 MachineBasicBlock *UpdateMBB = SystemZ::emitBlockAfter(UseAltMBB); 7542 7543 // StartMBB: 7544 // ... 7545 // %OrigVal = L Disp(%Base) 7546 // # fall through to LoopMBB 7547 MBB = StartMBB; 7548 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); 7549 MBB->addSuccessor(LoopMBB); 7550 7551 // LoopMBB: 7552 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 7553 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 7554 // CompareOpcode %RotatedOldVal, %Src2 7555 // BRC KeepOldMask, UpdateMBB 7556 MBB = LoopMBB; 7557 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 7558 .addReg(OrigVal).addMBB(StartMBB) 7559 .addReg(Dest).addMBB(UpdateMBB); 7560 if (IsSubWord) 7561 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 7562 .addReg(OldVal).addReg(BitShift).addImm(0); 7563 BuildMI(MBB, DL, TII->get(CompareOpcode)) 7564 .addReg(RotatedOldVal).addReg(Src2); 7565 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7566 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 7567 MBB->addSuccessor(UpdateMBB); 7568 MBB->addSuccessor(UseAltMBB); 7569 7570 // UseAltMBB: 7571 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 7572 // # fall through to UpdateMBB 7573 MBB = UseAltMBB; 7574 if (IsSubWord) 7575 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 7576 .addReg(RotatedOldVal).addReg(Src2) 7577 .addImm(32).addImm(31 + BitSize).addImm(0); 7578 MBB->addSuccessor(UpdateMBB); 7579 7580 // UpdateMBB: 7581 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 7582 // [ %RotatedAltVal, UseAltMBB ] 7583 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 7584 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 7585 // JNE LoopMBB 7586 // # fall through to DoneMBB 7587 MBB = UpdateMBB; 7588 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 7589 .addReg(RotatedOldVal).addMBB(LoopMBB) 7590 .addReg(RotatedAltVal).addMBB(UseAltMBB); 7591 if (IsSubWord) 7592 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 7593 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 7594 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 7595 .addReg(OldVal) 7596 .addReg(NewVal) 7597 .add(Base) 7598 .addImm(Disp); 7599 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7600 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 7601 MBB->addSuccessor(LoopMBB); 7602 MBB->addSuccessor(DoneMBB); 7603 7604 MI.eraseFromParent(); 7605 return DoneMBB; 7606 } 7607 7608 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 7609 // instruction MI. 7610 MachineBasicBlock * 7611 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, 7612 MachineBasicBlock *MBB) const { 7613 MachineFunction &MF = *MBB->getParent(); 7614 const SystemZInstrInfo *TII = 7615 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 7616 MachineRegisterInfo &MRI = MF.getRegInfo(); 7617 7618 // Extract the operands. Base can be a register or a frame index. 7619 Register Dest = MI.getOperand(0).getReg(); 7620 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 7621 int64_t Disp = MI.getOperand(2).getImm(); 7622 Register CmpVal = MI.getOperand(3).getReg(); 7623 Register OrigSwapVal = MI.getOperand(4).getReg(); 7624 Register BitShift = MI.getOperand(5).getReg(); 7625 Register NegBitShift = MI.getOperand(6).getReg(); 7626 int64_t BitSize = MI.getOperand(7).getImm(); 7627 DebugLoc DL = MI.getDebugLoc(); 7628 7629 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 7630 7631 // Get the right opcodes for the displacement and zero-extension. 7632 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 7633 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 7634 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR; 7635 assert(LOpcode && CSOpcode && "Displacement out of range"); 7636 7637 // Create virtual registers for temporary results. 7638 Register OrigOldVal = MRI.createVirtualRegister(RC); 7639 Register OldVal = MRI.createVirtualRegister(RC); 7640 Register SwapVal = MRI.createVirtualRegister(RC); 7641 Register StoreVal = MRI.createVirtualRegister(RC); 7642 Register OldValRot = MRI.createVirtualRegister(RC); 7643 Register RetryOldVal = MRI.createVirtualRegister(RC); 7644 Register RetrySwapVal = MRI.createVirtualRegister(RC); 7645 7646 // Insert 2 basic blocks for the loop. 7647 MachineBasicBlock *StartMBB = MBB; 7648 MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB); 7649 MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB); 7650 MachineBasicBlock *SetMBB = SystemZ::emitBlockAfter(LoopMBB); 7651 7652 // StartMBB: 7653 // ... 7654 // %OrigOldVal = L Disp(%Base) 7655 // # fall through to LoopMBB 7656 MBB = StartMBB; 7657 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 7658 .add(Base) 7659 .addImm(Disp) 7660 .addReg(0); 7661 MBB->addSuccessor(LoopMBB); 7662 7663 // LoopMBB: 7664 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 7665 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 7666 // %OldValRot = RLL %OldVal, BitSize(%BitShift) 7667 // ^^ The low BitSize bits contain the field 7668 // of interest. 7669 // %RetrySwapVal = RISBG32 %SwapVal, %OldValRot, 32, 63-BitSize, 0 7670 // ^^ Replace the upper 32-BitSize bits of the 7671 // swap value with those that we loaded and rotated. 7672 // %Dest = LL[CH] %OldValRot 7673 // CR %Dest, %CmpVal 7674 // JNE DoneMBB 7675 // # Fall through to SetMBB 7676 MBB = LoopMBB; 7677 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 7678 .addReg(OrigOldVal).addMBB(StartMBB) 7679 .addReg(RetryOldVal).addMBB(SetMBB); 7680 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 7681 .addReg(OrigSwapVal).addMBB(StartMBB) 7682 .addReg(RetrySwapVal).addMBB(SetMBB); 7683 BuildMI(MBB, DL, TII->get(SystemZ::RLL), OldValRot) 7684 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 7685 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 7686 .addReg(SwapVal).addReg(OldValRot).addImm(32).addImm(63 - BitSize).addImm(0); 7687 BuildMI(MBB, DL, TII->get(ZExtOpcode), Dest) 7688 .addReg(OldValRot); 7689 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 7690 .addReg(Dest).addReg(CmpVal); 7691 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7692 .addImm(SystemZ::CCMASK_ICMP) 7693 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 7694 MBB->addSuccessor(DoneMBB); 7695 MBB->addSuccessor(SetMBB); 7696 7697 // SetMBB: 7698 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 7699 // ^^ Rotate the new field to its proper position. 7700 // %RetryOldVal = CS %OldVal, %StoreVal, Disp(%Base) 7701 // JNE LoopMBB 7702 // # fall through to ExitMBB 7703 MBB = SetMBB; 7704 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 7705 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 7706 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 7707 .addReg(OldVal) 7708 .addReg(StoreVal) 7709 .add(Base) 7710 .addImm(Disp); 7711 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7712 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 7713 MBB->addSuccessor(LoopMBB); 7714 MBB->addSuccessor(DoneMBB); 7715 7716 // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in 7717 // to the block after the loop. At this point, CC may have been defined 7718 // either by the CR in LoopMBB or by the CS in SetMBB. 7719 if (!MI.registerDefIsDead(SystemZ::CC)) 7720 DoneMBB->addLiveIn(SystemZ::CC); 7721 7722 MI.eraseFromParent(); 7723 return DoneMBB; 7724 } 7725 7726 // Emit a move from two GR64s to a GR128. 7727 MachineBasicBlock * 7728 SystemZTargetLowering::emitPair128(MachineInstr &MI, 7729 MachineBasicBlock *MBB) const { 7730 MachineFunction &MF = *MBB->getParent(); 7731 const SystemZInstrInfo *TII = 7732 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 7733 MachineRegisterInfo &MRI = MF.getRegInfo(); 7734 DebugLoc DL = MI.getDebugLoc(); 7735 7736 Register Dest = MI.getOperand(0).getReg(); 7737 Register Hi = MI.getOperand(1).getReg(); 7738 Register Lo = MI.getOperand(2).getReg(); 7739 Register Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 7740 Register Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 7741 7742 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1); 7743 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2) 7744 .addReg(Tmp1).addReg(Hi).addImm(SystemZ::subreg_h64); 7745 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 7746 .addReg(Tmp2).addReg(Lo).addImm(SystemZ::subreg_l64); 7747 7748 MI.eraseFromParent(); 7749 return MBB; 7750 } 7751 7752 // Emit an extension from a GR64 to a GR128. ClearEven is true 7753 // if the high register of the GR128 value must be cleared or false if 7754 // it's "don't care". 7755 MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI, 7756 MachineBasicBlock *MBB, 7757 bool ClearEven) const { 7758 MachineFunction &MF = *MBB->getParent(); 7759 const SystemZInstrInfo *TII = 7760 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 7761 MachineRegisterInfo &MRI = MF.getRegInfo(); 7762 DebugLoc DL = MI.getDebugLoc(); 7763 7764 Register Dest = MI.getOperand(0).getReg(); 7765 Register Src = MI.getOperand(1).getReg(); 7766 Register In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 7767 7768 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 7769 if (ClearEven) { 7770 Register NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 7771 Register Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 7772 7773 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 7774 .addImm(0); 7775 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 7776 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); 7777 In128 = NewIn128; 7778 } 7779 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 7780 .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64); 7781 7782 MI.eraseFromParent(); 7783 return MBB; 7784 } 7785 7786 MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( 7787 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 7788 MachineFunction &MF = *MBB->getParent(); 7789 const SystemZInstrInfo *TII = 7790 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 7791 MachineRegisterInfo &MRI = MF.getRegInfo(); 7792 DebugLoc DL = MI.getDebugLoc(); 7793 7794 MachineOperand DestBase = earlyUseOperand(MI.getOperand(0)); 7795 uint64_t DestDisp = MI.getOperand(1).getImm(); 7796 MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2)); 7797 uint64_t SrcDisp = MI.getOperand(3).getImm(); 7798 MachineOperand &LengthMO = MI.getOperand(4); 7799 uint64_t ImmLength = LengthMO.isImm() ? LengthMO.getImm() : 0; 7800 Register LenMinus1Reg = 7801 LengthMO.isReg() ? LengthMO.getReg() : SystemZ::NoRegister; 7802 7803 // When generating more than one CLC, all but the last will need to 7804 // branch to the end when a difference is found. 7805 MachineBasicBlock *EndMBB = (ImmLength > 256 && Opcode == SystemZ::CLC 7806 ? SystemZ::splitBlockAfter(MI, MBB) 7807 : nullptr); 7808 7809 // Check for the loop form, in which operand 5 is the trip count. 7810 if (MI.getNumExplicitOperands() > 5) { 7811 Register StartCountReg = MI.getOperand(5).getReg(); 7812 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); 7813 7814 auto loadZeroAddress = [&]() -> MachineOperand { 7815 Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 7816 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LGHI), Reg).addImm(0); 7817 return MachineOperand::CreateReg(Reg, false); 7818 }; 7819 if (DestBase.isReg() && DestBase.getReg() == SystemZ::NoRegister) 7820 DestBase = loadZeroAddress(); 7821 if (SrcBase.isReg() && SrcBase.getReg() == SystemZ::NoRegister) 7822 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress(); 7823 7824 MachineBasicBlock *StartMBB = nullptr; 7825 MachineBasicBlock *LoopMBB = nullptr; 7826 MachineBasicBlock *NextMBB = nullptr; 7827 MachineBasicBlock *DoneMBB = nullptr; 7828 MachineBasicBlock *AllDoneMBB = nullptr; 7829 7830 Register StartSrcReg = forceReg(MI, SrcBase, TII); 7831 Register StartDestReg = 7832 (HaveSingleBase ? StartSrcReg : forceReg(MI, DestBase, TII)); 7833 7834 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; 7835 Register ThisSrcReg = MRI.createVirtualRegister(RC); 7836 Register ThisDestReg = 7837 (HaveSingleBase ? ThisSrcReg : MRI.createVirtualRegister(RC)); 7838 Register NextSrcReg = MRI.createVirtualRegister(RC); 7839 Register NextDestReg = 7840 (HaveSingleBase ? NextSrcReg : MRI.createVirtualRegister(RC)); 7841 RC = &SystemZ::GR64BitRegClass; 7842 Register ThisCountReg = MRI.createVirtualRegister(RC); 7843 Register NextCountReg = MRI.createVirtualRegister(RC); 7844 7845 if (LengthMO.isReg()) { 7846 AllDoneMBB = SystemZ::splitBlockBefore(MI, MBB); 7847 StartMBB = SystemZ::emitBlockAfter(MBB); 7848 LoopMBB = SystemZ::emitBlockAfter(StartMBB); 7849 NextMBB = LoopMBB; 7850 DoneMBB = SystemZ::emitBlockAfter(LoopMBB); 7851 7852 // MBB: 7853 // # Jump to AllDoneMBB if LenMinus1Reg is -1, or fall thru to StartMBB. 7854 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 7855 .addReg(LenMinus1Reg).addImm(-1); 7856 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7857 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_EQ) 7858 .addMBB(AllDoneMBB); 7859 MBB->addSuccessor(AllDoneMBB); 7860 MBB->addSuccessor(StartMBB); 7861 7862 // StartMBB: 7863 // # Jump to DoneMBB if %StartCountReg is zero, or fall through to LoopMBB. 7864 MBB = StartMBB; 7865 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 7866 .addReg(StartCountReg).addImm(0); 7867 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7868 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_EQ) 7869 .addMBB(DoneMBB); 7870 MBB->addSuccessor(DoneMBB); 7871 MBB->addSuccessor(LoopMBB); 7872 } 7873 else { 7874 StartMBB = MBB; 7875 DoneMBB = SystemZ::splitBlockBefore(MI, MBB); 7876 LoopMBB = SystemZ::emitBlockAfter(StartMBB); 7877 NextMBB = (EndMBB ? SystemZ::emitBlockAfter(LoopMBB) : LoopMBB); 7878 7879 // StartMBB: 7880 // # fall through to LoopMBB 7881 MBB->addSuccessor(LoopMBB); 7882 7883 DestBase = MachineOperand::CreateReg(NextDestReg, false); 7884 SrcBase = MachineOperand::CreateReg(NextSrcReg, false); 7885 ImmLength &= 255; 7886 if (EndMBB && !ImmLength) 7887 // If the loop handled the whole CLC range, DoneMBB will be empty with 7888 // CC live-through into EndMBB, so add it as live-in. 7889 DoneMBB->addLiveIn(SystemZ::CC); 7890 } 7891 7892 // LoopMBB: 7893 // %ThisDestReg = phi [ %StartDestReg, StartMBB ], 7894 // [ %NextDestReg, NextMBB ] 7895 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], 7896 // [ %NextSrcReg, NextMBB ] 7897 // %ThisCountReg = phi [ %StartCountReg, StartMBB ], 7898 // [ %NextCountReg, NextMBB ] 7899 // ( PFD 2, 768+DestDisp(%ThisDestReg) ) 7900 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) 7901 // ( JLH EndMBB ) 7902 // 7903 // The prefetch is used only for MVC. The JLH is used only for CLC. 7904 MBB = LoopMBB; 7905 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) 7906 .addReg(StartDestReg).addMBB(StartMBB) 7907 .addReg(NextDestReg).addMBB(NextMBB); 7908 if (!HaveSingleBase) 7909 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) 7910 .addReg(StartSrcReg).addMBB(StartMBB) 7911 .addReg(NextSrcReg).addMBB(NextMBB); 7912 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) 7913 .addReg(StartCountReg).addMBB(StartMBB) 7914 .addReg(NextCountReg).addMBB(NextMBB); 7915 if (Opcode == SystemZ::MVC) 7916 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) 7917 .addImm(SystemZ::PFD_WRITE) 7918 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); 7919 BuildMI(MBB, DL, TII->get(Opcode)) 7920 .addReg(ThisDestReg).addImm(DestDisp).addImm(256) 7921 .addReg(ThisSrcReg).addImm(SrcDisp); 7922 if (EndMBB) { 7923 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7924 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 7925 .addMBB(EndMBB); 7926 MBB->addSuccessor(EndMBB); 7927 MBB->addSuccessor(NextMBB); 7928 } 7929 7930 // NextMBB: 7931 // %NextDestReg = LA 256(%ThisDestReg) 7932 // %NextSrcReg = LA 256(%ThisSrcReg) 7933 // %NextCountReg = AGHI %ThisCountReg, -1 7934 // CGHI %NextCountReg, 0 7935 // JLH LoopMBB 7936 // # fall through to DoneMBB 7937 // 7938 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. 7939 MBB = NextMBB; 7940 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) 7941 .addReg(ThisDestReg).addImm(256).addReg(0); 7942 if (!HaveSingleBase) 7943 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) 7944 .addReg(ThisSrcReg).addImm(256).addReg(0); 7945 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) 7946 .addReg(ThisCountReg).addImm(-1); 7947 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 7948 .addReg(NextCountReg).addImm(0); 7949 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7950 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 7951 .addMBB(LoopMBB); 7952 MBB->addSuccessor(LoopMBB); 7953 MBB->addSuccessor(DoneMBB); 7954 7955 MBB = DoneMBB; 7956 if (LengthMO.isReg()) { 7957 // DoneMBB: 7958 // # Make PHIs for RemDestReg/RemSrcReg as the loop may or may not run. 7959 // # Use EXecute Relative Long for the remainder of the bytes. The target 7960 // instruction of the EXRL will have a length field of 1 since 0 is an 7961 // illegal value. The number of bytes processed becomes (%LenMinus1Reg & 7962 // 0xff) + 1. 7963 // # Fall through to AllDoneMBB. 7964 Register RemSrcReg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 7965 Register RemDestReg = HaveSingleBase ? RemSrcReg 7966 : MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 7967 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RemDestReg) 7968 .addReg(StartDestReg).addMBB(StartMBB) 7969 .addReg(NextDestReg).addMBB(LoopMBB); 7970 if (!HaveSingleBase) 7971 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RemSrcReg) 7972 .addReg(StartSrcReg).addMBB(StartMBB) 7973 .addReg(NextSrcReg).addMBB(LoopMBB); 7974 MRI.constrainRegClass(LenMinus1Reg, &SystemZ::ADDR64BitRegClass); 7975 BuildMI(MBB, DL, TII->get(SystemZ::EXRL_Pseudo)) 7976 .addImm(Opcode) 7977 .addReg(LenMinus1Reg) 7978 .addReg(RemDestReg).addImm(DestDisp) 7979 .addReg(RemSrcReg).addImm(SrcDisp); 7980 MBB->addSuccessor(AllDoneMBB); 7981 MBB = AllDoneMBB; 7982 } 7983 } 7984 7985 // Handle any remaining bytes with straight-line code. 7986 while (ImmLength > 0) { 7987 uint64_t ThisLength = std::min(ImmLength, uint64_t(256)); 7988 // The previous iteration might have created out-of-range displacements. 7989 // Apply them using LAY if so. 7990 if (!isUInt<12>(DestDisp)) { 7991 Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 7992 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) 7993 .add(DestBase) 7994 .addImm(DestDisp) 7995 .addReg(0); 7996 DestBase = MachineOperand::CreateReg(Reg, false); 7997 DestDisp = 0; 7998 } 7999 if (!isUInt<12>(SrcDisp)) { 8000 Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 8001 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) 8002 .add(SrcBase) 8003 .addImm(SrcDisp) 8004 .addReg(0); 8005 SrcBase = MachineOperand::CreateReg(Reg, false); 8006 SrcDisp = 0; 8007 } 8008 BuildMI(*MBB, MI, DL, TII->get(Opcode)) 8009 .add(DestBase) 8010 .addImm(DestDisp) 8011 .addImm(ThisLength) 8012 .add(SrcBase) 8013 .addImm(SrcDisp) 8014 .setMemRefs(MI.memoperands()); 8015 DestDisp += ThisLength; 8016 SrcDisp += ThisLength; 8017 ImmLength -= ThisLength; 8018 // If there's another CLC to go, branch to the end if a difference 8019 // was found. 8020 if (EndMBB && ImmLength > 0) { 8021 MachineBasicBlock *NextMBB = SystemZ::splitBlockBefore(MI, MBB); 8022 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 8023 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 8024 .addMBB(EndMBB); 8025 MBB->addSuccessor(EndMBB); 8026 MBB->addSuccessor(NextMBB); 8027 MBB = NextMBB; 8028 } 8029 } 8030 if (EndMBB) { 8031 MBB->addSuccessor(EndMBB); 8032 MBB = EndMBB; 8033 MBB->addLiveIn(SystemZ::CC); 8034 } 8035 8036 MI.eraseFromParent(); 8037 return MBB; 8038 } 8039 8040 // Decompose string pseudo-instruction MI into a loop that continually performs 8041 // Opcode until CC != 3. 8042 MachineBasicBlock *SystemZTargetLowering::emitStringWrapper( 8043 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 8044 MachineFunction &MF = *MBB->getParent(); 8045 const SystemZInstrInfo *TII = 8046 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 8047 MachineRegisterInfo &MRI = MF.getRegInfo(); 8048 DebugLoc DL = MI.getDebugLoc(); 8049 8050 uint64_t End1Reg = MI.getOperand(0).getReg(); 8051 uint64_t Start1Reg = MI.getOperand(1).getReg(); 8052 uint64_t Start2Reg = MI.getOperand(2).getReg(); 8053 uint64_t CharReg = MI.getOperand(3).getReg(); 8054 8055 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; 8056 uint64_t This1Reg = MRI.createVirtualRegister(RC); 8057 uint64_t This2Reg = MRI.createVirtualRegister(RC); 8058 uint64_t End2Reg = MRI.createVirtualRegister(RC); 8059 8060 MachineBasicBlock *StartMBB = MBB; 8061 MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB); 8062 MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB); 8063 8064 // StartMBB: 8065 // # fall through to LoopMBB 8066 MBB->addSuccessor(LoopMBB); 8067 8068 // LoopMBB: 8069 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] 8070 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] 8071 // R0L = %CharReg 8072 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L 8073 // JO LoopMBB 8074 // # fall through to DoneMBB 8075 // 8076 // The load of R0L can be hoisted by post-RA LICM. 8077 MBB = LoopMBB; 8078 8079 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) 8080 .addReg(Start1Reg).addMBB(StartMBB) 8081 .addReg(End1Reg).addMBB(LoopMBB); 8082 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) 8083 .addReg(Start2Reg).addMBB(StartMBB) 8084 .addReg(End2Reg).addMBB(LoopMBB); 8085 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); 8086 BuildMI(MBB, DL, TII->get(Opcode)) 8087 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) 8088 .addReg(This1Reg).addReg(This2Reg); 8089 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 8090 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); 8091 MBB->addSuccessor(LoopMBB); 8092 MBB->addSuccessor(DoneMBB); 8093 8094 DoneMBB->addLiveIn(SystemZ::CC); 8095 8096 MI.eraseFromParent(); 8097 return DoneMBB; 8098 } 8099 8100 // Update TBEGIN instruction with final opcode and register clobbers. 8101 MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin( 8102 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode, 8103 bool NoFloat) const { 8104 MachineFunction &MF = *MBB->getParent(); 8105 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 8106 const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); 8107 8108 // Update opcode. 8109 MI.setDesc(TII->get(Opcode)); 8110 8111 // We cannot handle a TBEGIN that clobbers the stack or frame pointer. 8112 // Make sure to add the corresponding GRSM bits if they are missing. 8113 uint64_t Control = MI.getOperand(2).getImm(); 8114 static const unsigned GPRControlBit[16] = { 8115 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000, 8116 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100 8117 }; 8118 Control |= GPRControlBit[15]; 8119 if (TFI->hasFP(MF)) 8120 Control |= GPRControlBit[11]; 8121 MI.getOperand(2).setImm(Control); 8122 8123 // Add GPR clobbers. 8124 for (int I = 0; I < 16; I++) { 8125 if ((Control & GPRControlBit[I]) == 0) { 8126 unsigned Reg = SystemZMC::GR64Regs[I]; 8127 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 8128 } 8129 } 8130 8131 // Add FPR/VR clobbers. 8132 if (!NoFloat && (Control & 4) != 0) { 8133 if (Subtarget.hasVector()) { 8134 for (int I = 0; I < 32; I++) { 8135 unsigned Reg = SystemZMC::VR128Regs[I]; 8136 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 8137 } 8138 } else { 8139 for (int I = 0; I < 16; I++) { 8140 unsigned Reg = SystemZMC::FP64Regs[I]; 8141 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 8142 } 8143 } 8144 } 8145 8146 return MBB; 8147 } 8148 8149 MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0( 8150 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 8151 MachineFunction &MF = *MBB->getParent(); 8152 MachineRegisterInfo *MRI = &MF.getRegInfo(); 8153 const SystemZInstrInfo *TII = 8154 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 8155 DebugLoc DL = MI.getDebugLoc(); 8156 8157 Register SrcReg = MI.getOperand(0).getReg(); 8158 8159 // Create new virtual register of the same class as source. 8160 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); 8161 Register DstReg = MRI->createVirtualRegister(RC); 8162 8163 // Replace pseudo with a normal load-and-test that models the def as 8164 // well. 8165 BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg) 8166 .addReg(SrcReg) 8167 .setMIFlags(MI.getFlags()); 8168 MI.eraseFromParent(); 8169 8170 return MBB; 8171 } 8172 8173 MachineBasicBlock *SystemZTargetLowering::emitProbedAlloca( 8174 MachineInstr &MI, MachineBasicBlock *MBB) const { 8175 MachineFunction &MF = *MBB->getParent(); 8176 MachineRegisterInfo *MRI = &MF.getRegInfo(); 8177 const SystemZInstrInfo *TII = 8178 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 8179 DebugLoc DL = MI.getDebugLoc(); 8180 const unsigned ProbeSize = getStackProbeSize(MF); 8181 Register DstReg = MI.getOperand(0).getReg(); 8182 Register SizeReg = MI.getOperand(2).getReg(); 8183 8184 MachineBasicBlock *StartMBB = MBB; 8185 MachineBasicBlock *DoneMBB = SystemZ::splitBlockAfter(MI, MBB); 8186 MachineBasicBlock *LoopTestMBB = SystemZ::emitBlockAfter(StartMBB); 8187 MachineBasicBlock *LoopBodyMBB = SystemZ::emitBlockAfter(LoopTestMBB); 8188 MachineBasicBlock *TailTestMBB = SystemZ::emitBlockAfter(LoopBodyMBB); 8189 MachineBasicBlock *TailMBB = SystemZ::emitBlockAfter(TailTestMBB); 8190 8191 MachineMemOperand *VolLdMMO = MF.getMachineMemOperand(MachinePointerInfo(), 8192 MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad, 8, Align(1)); 8193 8194 Register PHIReg = MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass); 8195 Register IncReg = MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass); 8196 8197 // LoopTestMBB 8198 // BRC TailTestMBB 8199 // # fallthrough to LoopBodyMBB 8200 StartMBB->addSuccessor(LoopTestMBB); 8201 MBB = LoopTestMBB; 8202 BuildMI(MBB, DL, TII->get(SystemZ::PHI), PHIReg) 8203 .addReg(SizeReg) 8204 .addMBB(StartMBB) 8205 .addReg(IncReg) 8206 .addMBB(LoopBodyMBB); 8207 BuildMI(MBB, DL, TII->get(SystemZ::CLGFI)) 8208 .addReg(PHIReg) 8209 .addImm(ProbeSize); 8210 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 8211 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_LT) 8212 .addMBB(TailTestMBB); 8213 MBB->addSuccessor(LoopBodyMBB); 8214 MBB->addSuccessor(TailTestMBB); 8215 8216 // LoopBodyMBB: Allocate and probe by means of a volatile compare. 8217 // J LoopTestMBB 8218 MBB = LoopBodyMBB; 8219 BuildMI(MBB, DL, TII->get(SystemZ::SLGFI), IncReg) 8220 .addReg(PHIReg) 8221 .addImm(ProbeSize); 8222 BuildMI(MBB, DL, TII->get(SystemZ::SLGFI), SystemZ::R15D) 8223 .addReg(SystemZ::R15D) 8224 .addImm(ProbeSize); 8225 BuildMI(MBB, DL, TII->get(SystemZ::CG)).addReg(SystemZ::R15D) 8226 .addReg(SystemZ::R15D).addImm(ProbeSize - 8).addReg(0) 8227 .setMemRefs(VolLdMMO); 8228 BuildMI(MBB, DL, TII->get(SystemZ::J)).addMBB(LoopTestMBB); 8229 MBB->addSuccessor(LoopTestMBB); 8230 8231 // TailTestMBB 8232 // BRC DoneMBB 8233 // # fallthrough to TailMBB 8234 MBB = TailTestMBB; 8235 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 8236 .addReg(PHIReg) 8237 .addImm(0); 8238 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 8239 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_EQ) 8240 .addMBB(DoneMBB); 8241 MBB->addSuccessor(TailMBB); 8242 MBB->addSuccessor(DoneMBB); 8243 8244 // TailMBB 8245 // # fallthrough to DoneMBB 8246 MBB = TailMBB; 8247 BuildMI(MBB, DL, TII->get(SystemZ::SLGR), SystemZ::R15D) 8248 .addReg(SystemZ::R15D) 8249 .addReg(PHIReg); 8250 BuildMI(MBB, DL, TII->get(SystemZ::CG)).addReg(SystemZ::R15D) 8251 .addReg(SystemZ::R15D).addImm(-8).addReg(PHIReg) 8252 .setMemRefs(VolLdMMO); 8253 MBB->addSuccessor(DoneMBB); 8254 8255 // DoneMBB 8256 MBB = DoneMBB; 8257 BuildMI(*MBB, MBB->begin(), DL, TII->get(TargetOpcode::COPY), DstReg) 8258 .addReg(SystemZ::R15D); 8259 8260 MI.eraseFromParent(); 8261 return DoneMBB; 8262 } 8263 8264 SDValue SystemZTargetLowering:: 8265 getBackchainAddress(SDValue SP, SelectionDAG &DAG) const { 8266 MachineFunction &MF = DAG.getMachineFunction(); 8267 auto *TFL = 8268 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering()); 8269 SDLoc DL(SP); 8270 return DAG.getNode(ISD::ADD, DL, MVT::i64, SP, 8271 DAG.getIntPtrConstant(TFL->getBackchainOffset(MF), DL)); 8272 } 8273 8274 MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter( 8275 MachineInstr &MI, MachineBasicBlock *MBB) const { 8276 switch (MI.getOpcode()) { 8277 case SystemZ::Select32: 8278 case SystemZ::Select64: 8279 case SystemZ::SelectF32: 8280 case SystemZ::SelectF64: 8281 case SystemZ::SelectF128: 8282 case SystemZ::SelectVR32: 8283 case SystemZ::SelectVR64: 8284 case SystemZ::SelectVR128: 8285 return emitSelect(MI, MBB); 8286 8287 case SystemZ::CondStore8Mux: 8288 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); 8289 case SystemZ::CondStore8MuxInv: 8290 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); 8291 case SystemZ::CondStore16Mux: 8292 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); 8293 case SystemZ::CondStore16MuxInv: 8294 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); 8295 case SystemZ::CondStore32Mux: 8296 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false); 8297 case SystemZ::CondStore32MuxInv: 8298 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true); 8299 case SystemZ::CondStore8: 8300 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 8301 case SystemZ::CondStore8Inv: 8302 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 8303 case SystemZ::CondStore16: 8304 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 8305 case SystemZ::CondStore16Inv: 8306 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 8307 case SystemZ::CondStore32: 8308 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 8309 case SystemZ::CondStore32Inv: 8310 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 8311 case SystemZ::CondStore64: 8312 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 8313 case SystemZ::CondStore64Inv: 8314 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 8315 case SystemZ::CondStoreF32: 8316 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 8317 case SystemZ::CondStoreF32Inv: 8318 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 8319 case SystemZ::CondStoreF64: 8320 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 8321 case SystemZ::CondStoreF64Inv: 8322 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 8323 8324 case SystemZ::PAIR128: 8325 return emitPair128(MI, MBB); 8326 case SystemZ::AEXT128: 8327 return emitExt128(MI, MBB, false); 8328 case SystemZ::ZEXT128: 8329 return emitExt128(MI, MBB, true); 8330 8331 case SystemZ::ATOMIC_SWAPW: 8332 return emitAtomicLoadBinary(MI, MBB, 0, 0); 8333 case SystemZ::ATOMIC_SWAP_32: 8334 return emitAtomicLoadBinary(MI, MBB, 0, 32); 8335 case SystemZ::ATOMIC_SWAP_64: 8336 return emitAtomicLoadBinary(MI, MBB, 0, 64); 8337 8338 case SystemZ::ATOMIC_LOADW_AR: 8339 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 8340 case SystemZ::ATOMIC_LOADW_AFI: 8341 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 8342 case SystemZ::ATOMIC_LOAD_AR: 8343 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 8344 case SystemZ::ATOMIC_LOAD_AHI: 8345 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 8346 case SystemZ::ATOMIC_LOAD_AFI: 8347 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 8348 case SystemZ::ATOMIC_LOAD_AGR: 8349 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 8350 case SystemZ::ATOMIC_LOAD_AGHI: 8351 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 8352 case SystemZ::ATOMIC_LOAD_AGFI: 8353 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 8354 8355 case SystemZ::ATOMIC_LOADW_SR: 8356 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 8357 case SystemZ::ATOMIC_LOAD_SR: 8358 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 8359 case SystemZ::ATOMIC_LOAD_SGR: 8360 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 8361 8362 case SystemZ::ATOMIC_LOADW_NR: 8363 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 8364 case SystemZ::ATOMIC_LOADW_NILH: 8365 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); 8366 case SystemZ::ATOMIC_LOAD_NR: 8367 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 8368 case SystemZ::ATOMIC_LOAD_NILL: 8369 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); 8370 case SystemZ::ATOMIC_LOAD_NILH: 8371 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); 8372 case SystemZ::ATOMIC_LOAD_NILF: 8373 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); 8374 case SystemZ::ATOMIC_LOAD_NGR: 8375 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 8376 case SystemZ::ATOMIC_LOAD_NILL64: 8377 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); 8378 case SystemZ::ATOMIC_LOAD_NILH64: 8379 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); 8380 case SystemZ::ATOMIC_LOAD_NIHL64: 8381 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); 8382 case SystemZ::ATOMIC_LOAD_NIHH64: 8383 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); 8384 case SystemZ::ATOMIC_LOAD_NILF64: 8385 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); 8386 case SystemZ::ATOMIC_LOAD_NIHF64: 8387 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); 8388 8389 case SystemZ::ATOMIC_LOADW_OR: 8390 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 8391 case SystemZ::ATOMIC_LOADW_OILH: 8392 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); 8393 case SystemZ::ATOMIC_LOAD_OR: 8394 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 8395 case SystemZ::ATOMIC_LOAD_OILL: 8396 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); 8397 case SystemZ::ATOMIC_LOAD_OILH: 8398 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); 8399 case SystemZ::ATOMIC_LOAD_OILF: 8400 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); 8401 case SystemZ::ATOMIC_LOAD_OGR: 8402 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 8403 case SystemZ::ATOMIC_LOAD_OILL64: 8404 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); 8405 case SystemZ::ATOMIC_LOAD_OILH64: 8406 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); 8407 case SystemZ::ATOMIC_LOAD_OIHL64: 8408 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); 8409 case SystemZ::ATOMIC_LOAD_OIHH64: 8410 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); 8411 case SystemZ::ATOMIC_LOAD_OILF64: 8412 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); 8413 case SystemZ::ATOMIC_LOAD_OIHF64: 8414 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); 8415 8416 case SystemZ::ATOMIC_LOADW_XR: 8417 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 8418 case SystemZ::ATOMIC_LOADW_XILF: 8419 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); 8420 case SystemZ::ATOMIC_LOAD_XR: 8421 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 8422 case SystemZ::ATOMIC_LOAD_XILF: 8423 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); 8424 case SystemZ::ATOMIC_LOAD_XGR: 8425 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 8426 case SystemZ::ATOMIC_LOAD_XILF64: 8427 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); 8428 case SystemZ::ATOMIC_LOAD_XIHF64: 8429 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); 8430 8431 case SystemZ::ATOMIC_LOADW_NRi: 8432 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 8433 case SystemZ::ATOMIC_LOADW_NILHi: 8434 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); 8435 case SystemZ::ATOMIC_LOAD_NRi: 8436 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 8437 case SystemZ::ATOMIC_LOAD_NILLi: 8438 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); 8439 case SystemZ::ATOMIC_LOAD_NILHi: 8440 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); 8441 case SystemZ::ATOMIC_LOAD_NILFi: 8442 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); 8443 case SystemZ::ATOMIC_LOAD_NGRi: 8444 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 8445 case SystemZ::ATOMIC_LOAD_NILL64i: 8446 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); 8447 case SystemZ::ATOMIC_LOAD_NILH64i: 8448 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); 8449 case SystemZ::ATOMIC_LOAD_NIHL64i: 8450 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); 8451 case SystemZ::ATOMIC_LOAD_NIHH64i: 8452 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); 8453 case SystemZ::ATOMIC_LOAD_NILF64i: 8454 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); 8455 case SystemZ::ATOMIC_LOAD_NIHF64i: 8456 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); 8457 8458 case SystemZ::ATOMIC_LOADW_MIN: 8459 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 8460 SystemZ::CCMASK_CMP_LE, 0); 8461 case SystemZ::ATOMIC_LOAD_MIN_32: 8462 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 8463 SystemZ::CCMASK_CMP_LE, 32); 8464 case SystemZ::ATOMIC_LOAD_MIN_64: 8465 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 8466 SystemZ::CCMASK_CMP_LE, 64); 8467 8468 case SystemZ::ATOMIC_LOADW_MAX: 8469 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 8470 SystemZ::CCMASK_CMP_GE, 0); 8471 case SystemZ::ATOMIC_LOAD_MAX_32: 8472 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 8473 SystemZ::CCMASK_CMP_GE, 32); 8474 case SystemZ::ATOMIC_LOAD_MAX_64: 8475 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 8476 SystemZ::CCMASK_CMP_GE, 64); 8477 8478 case SystemZ::ATOMIC_LOADW_UMIN: 8479 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 8480 SystemZ::CCMASK_CMP_LE, 0); 8481 case SystemZ::ATOMIC_LOAD_UMIN_32: 8482 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 8483 SystemZ::CCMASK_CMP_LE, 32); 8484 case SystemZ::ATOMIC_LOAD_UMIN_64: 8485 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 8486 SystemZ::CCMASK_CMP_LE, 64); 8487 8488 case SystemZ::ATOMIC_LOADW_UMAX: 8489 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 8490 SystemZ::CCMASK_CMP_GE, 0); 8491 case SystemZ::ATOMIC_LOAD_UMAX_32: 8492 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 8493 SystemZ::CCMASK_CMP_GE, 32); 8494 case SystemZ::ATOMIC_LOAD_UMAX_64: 8495 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 8496 SystemZ::CCMASK_CMP_GE, 64); 8497 8498 case SystemZ::ATOMIC_CMP_SWAPW: 8499 return emitAtomicCmpSwapW(MI, MBB); 8500 case SystemZ::MVCSequence: 8501 case SystemZ::MVCLoop: 8502 return emitMemMemWrapper(MI, MBB, SystemZ::MVC); 8503 case SystemZ::NCSequence: 8504 case SystemZ::NCLoop: 8505 return emitMemMemWrapper(MI, MBB, SystemZ::NC); 8506 case SystemZ::OCSequence: 8507 case SystemZ::OCLoop: 8508 return emitMemMemWrapper(MI, MBB, SystemZ::OC); 8509 case SystemZ::XCSequence: 8510 case SystemZ::XCLoop: 8511 case SystemZ::XCLoopVarLen: 8512 return emitMemMemWrapper(MI, MBB, SystemZ::XC); 8513 case SystemZ::CLCSequence: 8514 case SystemZ::CLCLoop: 8515 return emitMemMemWrapper(MI, MBB, SystemZ::CLC); 8516 case SystemZ::CLSTLoop: 8517 return emitStringWrapper(MI, MBB, SystemZ::CLST); 8518 case SystemZ::MVSTLoop: 8519 return emitStringWrapper(MI, MBB, SystemZ::MVST); 8520 case SystemZ::SRSTLoop: 8521 return emitStringWrapper(MI, MBB, SystemZ::SRST); 8522 case SystemZ::TBEGIN: 8523 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false); 8524 case SystemZ::TBEGIN_nofloat: 8525 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true); 8526 case SystemZ::TBEGINC: 8527 return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true); 8528 case SystemZ::LTEBRCompare_VecPseudo: 8529 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR); 8530 case SystemZ::LTDBRCompare_VecPseudo: 8531 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR); 8532 case SystemZ::LTXBRCompare_VecPseudo: 8533 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR); 8534 8535 case SystemZ::PROBED_ALLOCA: 8536 return emitProbedAlloca(MI, MBB); 8537 8538 case TargetOpcode::STACKMAP: 8539 case TargetOpcode::PATCHPOINT: 8540 return emitPatchPoint(MI, MBB); 8541 8542 default: 8543 llvm_unreachable("Unexpected instr type to insert"); 8544 } 8545 } 8546 8547 // This is only used by the isel schedulers, and is needed only to prevent 8548 // compiler from crashing when list-ilp is used. 8549 const TargetRegisterClass * 8550 SystemZTargetLowering::getRepRegClassFor(MVT VT) const { 8551 if (VT == MVT::Untyped) 8552 return &SystemZ::ADDR128BitRegClass; 8553 return TargetLowering::getRepRegClassFor(VT); 8554 } 8555