1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the PPCISelLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PPCISelLowering.h" 14 #include "MCTargetDesc/PPCPredicates.h" 15 #include "PPC.h" 16 #include "PPCCCState.h" 17 #include "PPCCallingConv.h" 18 #include "PPCFrameLowering.h" 19 #include "PPCInstrInfo.h" 20 #include "PPCMachineFunctionInfo.h" 21 #include "PPCPerfectShuffle.h" 22 #include "PPCRegisterInfo.h" 23 #include "PPCSubtarget.h" 24 #include "PPCTargetMachine.h" 25 #include "llvm/ADT/APFloat.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/StringSwitch.h" 37 #include "llvm/CodeGen/CallingConvLower.h" 38 #include "llvm/CodeGen/ISDOpcodes.h" 39 #include "llvm/CodeGen/MachineBasicBlock.h" 40 #include "llvm/CodeGen/MachineFrameInfo.h" 41 #include "llvm/CodeGen/MachineFunction.h" 42 #include "llvm/CodeGen/MachineInstr.h" 43 #include "llvm/CodeGen/MachineInstrBuilder.h" 44 #include "llvm/CodeGen/MachineJumpTableInfo.h" 45 #include "llvm/CodeGen/MachineLoopInfo.h" 46 #include "llvm/CodeGen/MachineMemOperand.h" 47 #include "llvm/CodeGen/MachineModuleInfo.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/RuntimeLibcalls.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetLowering.h" 55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 56 #include "llvm/CodeGen/TargetRegisterInfo.h" 57 #include "llvm/CodeGen/ValueTypes.h" 58 #include "llvm/IR/CallingConv.h" 59 #include "llvm/IR/Constant.h" 60 #include "llvm/IR/Constants.h" 61 #include "llvm/IR/DataLayout.h" 62 #include "llvm/IR/DebugLoc.h" 63 #include "llvm/IR/DerivedTypes.h" 64 #include "llvm/IR/Function.h" 65 #include "llvm/IR/GlobalValue.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/Intrinsics.h" 69 #include "llvm/IR/IntrinsicsPowerPC.h" 70 #include "llvm/IR/Module.h" 71 #include "llvm/IR/Type.h" 72 #include "llvm/IR/Use.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/MC/MCContext.h" 75 #include "llvm/MC/MCExpr.h" 76 #include "llvm/MC/MCRegisterInfo.h" 77 #include "llvm/MC/MCSectionXCOFF.h" 78 #include "llvm/MC/MCSymbolXCOFF.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/BranchProbability.h" 81 #include "llvm/Support/Casting.h" 82 #include "llvm/Support/CodeGen.h" 83 #include "llvm/Support/CommandLine.h" 84 #include "llvm/Support/Compiler.h" 85 #include "llvm/Support/Debug.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/Format.h" 88 #include "llvm/Support/KnownBits.h" 89 #include "llvm/Support/MachineValueType.h" 90 #include "llvm/Support/MathExtras.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Target/TargetMachine.h" 93 #include "llvm/Target/TargetOptions.h" 94 #include <algorithm> 95 #include <cassert> 96 #include <cstdint> 97 #include <iterator> 98 #include <list> 99 #include <utility> 100 #include <vector> 101 102 using namespace llvm; 103 104 #define DEBUG_TYPE "ppc-lowering" 105 106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 108 109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 111 112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 114 115 static cl::opt<bool> DisableSCO("disable-ppc-sco", 116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 117 118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", 119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden); 120 121 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables", 122 cl::desc("use absolute jump tables on ppc"), cl::Hidden); 123 124 static cl::opt<bool> EnableQuadwordAtomics( 125 "ppc-quadword-atomics", 126 cl::desc("enable quadword lock-free atomic operations"), cl::init(false), 127 cl::Hidden); 128 129 static cl::opt<bool> 130 DisablePerfectShuffle("ppc-disable-perfect-shuffle", 131 cl::desc("disable vector permute decomposition"), 132 cl::init(true), cl::Hidden); 133 134 cl::opt<bool> DisableAutoPairedVecSt( 135 "disable-auto-paired-vec-st", 136 cl::desc("disable automatically generated 32byte paired vector stores"), 137 cl::init(true), cl::Hidden); 138 139 STATISTIC(NumTailCalls, "Number of tail calls"); 140 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 141 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM"); 142 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed"); 143 144 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 145 146 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl); 147 148 static const char AIXSSPCanaryWordName[] = "__ssp_canary_word"; 149 150 // FIXME: Remove this once the bug has been fixed! 151 extern cl::opt<bool> ANDIGlueBug; 152 153 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 154 const PPCSubtarget &STI) 155 : TargetLowering(TM), Subtarget(STI) { 156 // Initialize map that relates the PPC addressing modes to the computed flags 157 // of a load/store instruction. The map is used to determine the optimal 158 // addressing mode when selecting load and stores. 159 initializeAddrModeMap(); 160 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 161 // arguments are at least 4/8 bytes aligned. 162 bool isPPC64 = Subtarget.isPPC64(); 163 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4)); 164 165 // Set up the register classes. 166 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 167 if (!useSoftFloat()) { 168 if (hasSPE()) { 169 addRegisterClass(MVT::f32, &PPC::GPRCRegClass); 170 // EFPU2 APU only supports f32 171 if (!Subtarget.hasEFPU2()) 172 addRegisterClass(MVT::f64, &PPC::SPERCRegClass); 173 } else { 174 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 175 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 176 } 177 } 178 179 // Match BITREVERSE to customized fast code sequence in the td file. 180 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 181 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 182 183 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 184 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 185 186 // Custom lower inline assembly to check for special registers. 187 setOperationAction(ISD::INLINEASM, MVT::Other, Custom); 188 setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom); 189 190 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 191 for (MVT VT : MVT::integer_valuetypes()) { 192 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 193 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 194 } 195 196 if (Subtarget.isISA3_0()) { 197 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal); 198 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal); 199 setTruncStoreAction(MVT::f64, MVT::f16, Legal); 200 setTruncStoreAction(MVT::f32, MVT::f16, Legal); 201 } else { 202 // No extending loads from f16 or HW conversions back and forth. 203 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 204 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 205 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 206 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 207 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 208 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 209 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 210 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 211 } 212 213 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 214 215 // PowerPC has pre-inc load and store's. 216 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 217 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 218 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 219 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 220 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 221 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 222 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 223 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 224 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 225 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 226 if (!Subtarget.hasSPE()) { 227 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 228 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 229 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 230 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 231 } 232 233 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. 234 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 235 for (MVT VT : ScalarIntVTs) { 236 setOperationAction(ISD::ADDC, VT, Legal); 237 setOperationAction(ISD::ADDE, VT, Legal); 238 setOperationAction(ISD::SUBC, VT, Legal); 239 setOperationAction(ISD::SUBE, VT, Legal); 240 } 241 242 if (Subtarget.useCRBits()) { 243 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 244 245 if (isPPC64 || Subtarget.hasFPCVT()) { 246 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote); 247 AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1, 248 isPPC64 ? MVT::i64 : MVT::i32); 249 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote); 250 AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1, 251 isPPC64 ? MVT::i64 : MVT::i32); 252 253 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 254 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 255 isPPC64 ? MVT::i64 : MVT::i32); 256 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 257 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 258 isPPC64 ? MVT::i64 : MVT::i32); 259 260 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote); 261 AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1, 262 isPPC64 ? MVT::i64 : MVT::i32); 263 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote); 264 AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1, 265 isPPC64 ? MVT::i64 : MVT::i32); 266 267 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote); 268 AddPromotedToType(ISD::FP_TO_SINT, MVT::i1, 269 isPPC64 ? MVT::i64 : MVT::i32); 270 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); 271 AddPromotedToType(ISD::FP_TO_UINT, MVT::i1, 272 isPPC64 ? MVT::i64 : MVT::i32); 273 } else { 274 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom); 275 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom); 276 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 277 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 278 } 279 280 // PowerPC does not support direct load/store of condition registers. 281 setOperationAction(ISD::LOAD, MVT::i1, Custom); 282 setOperationAction(ISD::STORE, MVT::i1, Custom); 283 284 // FIXME: Remove this once the ANDI glue bug is fixed: 285 if (ANDIGlueBug) 286 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 287 288 for (MVT VT : MVT::integer_valuetypes()) { 289 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 290 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 291 setTruncStoreAction(VT, MVT::i1, Expand); 292 } 293 294 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 295 } 296 297 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 298 // PPC (the libcall is not available). 299 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); 300 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); 301 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom); 302 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom); 303 304 // We do not currently implement these libm ops for PowerPC. 305 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 306 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 307 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 308 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 309 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 310 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 311 312 // PowerPC has no SREM/UREM instructions unless we are on P9 313 // On P9 we may use a hardware instruction to compute the remainder. 314 // When the result of both the remainder and the division is required it is 315 // more efficient to compute the remainder from the result of the division 316 // rather than use the remainder instruction. The instructions are legalized 317 // directly because the DivRemPairsPass performs the transformation at the IR 318 // level. 319 if (Subtarget.isISA3_0()) { 320 setOperationAction(ISD::SREM, MVT::i32, Legal); 321 setOperationAction(ISD::UREM, MVT::i32, Legal); 322 setOperationAction(ISD::SREM, MVT::i64, Legal); 323 setOperationAction(ISD::UREM, MVT::i64, Legal); 324 } else { 325 setOperationAction(ISD::SREM, MVT::i32, Expand); 326 setOperationAction(ISD::UREM, MVT::i32, Expand); 327 setOperationAction(ISD::SREM, MVT::i64, Expand); 328 setOperationAction(ISD::UREM, MVT::i64, Expand); 329 } 330 331 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 332 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 333 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 334 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 335 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 336 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 337 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 338 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 339 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 340 341 // Handle constrained floating-point operations of scalar. 342 // TODO: Handle SPE specific operation. 343 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal); 344 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal); 345 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal); 346 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal); 347 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); 348 349 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal); 350 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal); 351 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal); 352 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal); 353 354 if (!Subtarget.hasSPE()) { 355 setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal); 356 setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal); 357 } 358 359 if (Subtarget.hasVSX()) { 360 setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal); 361 setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal); 362 } 363 364 if (Subtarget.hasFSQRT()) { 365 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal); 366 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal); 367 } 368 369 if (Subtarget.hasFPRND()) { 370 setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal); 371 setOperationAction(ISD::STRICT_FCEIL, MVT::f32, Legal); 372 setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal); 373 setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal); 374 375 setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal); 376 setOperationAction(ISD::STRICT_FCEIL, MVT::f64, Legal); 377 setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal); 378 setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal); 379 } 380 381 // We don't support sin/cos/sqrt/fmod/pow 382 setOperationAction(ISD::FSIN , MVT::f64, Expand); 383 setOperationAction(ISD::FCOS , MVT::f64, Expand); 384 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 385 setOperationAction(ISD::FREM , MVT::f64, Expand); 386 setOperationAction(ISD::FPOW , MVT::f64, Expand); 387 setOperationAction(ISD::FSIN , MVT::f32, Expand); 388 setOperationAction(ISD::FCOS , MVT::f32, Expand); 389 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 390 setOperationAction(ISD::FREM , MVT::f32, Expand); 391 setOperationAction(ISD::FPOW , MVT::f32, Expand); 392 393 // MASS transformation for LLVM intrinsics with replicating fast-math flag 394 // to be consistent to PPCGenScalarMASSEntries pass 395 if (TM.getOptLevel() == CodeGenOpt::Aggressive && 396 TM.Options.PPCGenScalarMASSEntries) { 397 setOperationAction(ISD::FSIN , MVT::f64, Custom); 398 setOperationAction(ISD::FCOS , MVT::f64, Custom); 399 setOperationAction(ISD::FPOW , MVT::f64, Custom); 400 setOperationAction(ISD::FLOG, MVT::f64, Custom); 401 setOperationAction(ISD::FLOG10, MVT::f64, Custom); 402 setOperationAction(ISD::FEXP, MVT::f64, Custom); 403 setOperationAction(ISD::FSIN , MVT::f32, Custom); 404 setOperationAction(ISD::FCOS , MVT::f32, Custom); 405 setOperationAction(ISD::FPOW , MVT::f32, Custom); 406 setOperationAction(ISD::FLOG, MVT::f32, Custom); 407 setOperationAction(ISD::FLOG10, MVT::f32, Custom); 408 setOperationAction(ISD::FEXP, MVT::f32, Custom); 409 } 410 411 if (Subtarget.hasSPE()) { 412 setOperationAction(ISD::FMA , MVT::f64, Expand); 413 setOperationAction(ISD::FMA , MVT::f32, Expand); 414 } else { 415 setOperationAction(ISD::FMA , MVT::f64, Legal); 416 setOperationAction(ISD::FMA , MVT::f32, Legal); 417 } 418 419 if (Subtarget.hasSPE()) 420 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 421 422 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 423 424 // If we're enabling GP optimizations, use hardware square root 425 if (!Subtarget.hasFSQRT() && 426 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 427 Subtarget.hasFRE())) 428 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 429 430 if (!Subtarget.hasFSQRT() && 431 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 432 Subtarget.hasFRES())) 433 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 434 435 if (Subtarget.hasFCPSGN()) { 436 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 437 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 438 } else { 439 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 440 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 441 } 442 443 if (Subtarget.hasFPRND()) { 444 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 445 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 446 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 447 setOperationAction(ISD::FROUND, MVT::f64, Legal); 448 449 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 450 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 451 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 452 setOperationAction(ISD::FROUND, MVT::f32, Legal); 453 } 454 455 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 456 // to speed up scalar BSWAP64. 457 // CTPOP or CTTZ were introduced in P8/P9 respectively 458 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 459 if (Subtarget.hasP9Vector() && Subtarget.isPPC64()) 460 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 461 else 462 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 463 if (Subtarget.isISA3_0()) { 464 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 465 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 466 } else { 467 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 468 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 469 } 470 471 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 472 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 473 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 474 } else { 475 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 476 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 477 } 478 479 // PowerPC does not have ROTR 480 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 481 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 482 483 if (!Subtarget.useCRBits()) { 484 // PowerPC does not have Select 485 setOperationAction(ISD::SELECT, MVT::i32, Expand); 486 setOperationAction(ISD::SELECT, MVT::i64, Expand); 487 setOperationAction(ISD::SELECT, MVT::f32, Expand); 488 setOperationAction(ISD::SELECT, MVT::f64, Expand); 489 } 490 491 // PowerPC wants to turn select_cc of FP into fsel when possible. 492 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 493 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 494 495 // PowerPC wants to optimize integer setcc a bit 496 if (!Subtarget.useCRBits()) 497 setOperationAction(ISD::SETCC, MVT::i32, Custom); 498 499 if (Subtarget.hasFPU()) { 500 setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal); 501 setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal); 502 setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal); 503 504 setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal); 505 setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal); 506 setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal); 507 } 508 509 // PowerPC does not have BRCOND which requires SetCC 510 if (!Subtarget.useCRBits()) 511 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 512 513 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 514 515 if (Subtarget.hasSPE()) { 516 // SPE has built-in conversions 517 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal); 518 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal); 519 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal); 520 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); 521 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); 522 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); 523 524 // SPE supports signaling compare of f32/f64. 525 setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal); 526 setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal); 527 } else { 528 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 529 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 530 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 531 532 // PowerPC does not have [U|S]INT_TO_FP 533 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand); 534 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand); 535 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 536 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 537 } 538 539 if (Subtarget.hasDirectMove() && isPPC64) { 540 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 541 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 542 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 543 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 544 if (TM.Options.UnsafeFPMath) { 545 setOperationAction(ISD::LRINT, MVT::f64, Legal); 546 setOperationAction(ISD::LRINT, MVT::f32, Legal); 547 setOperationAction(ISD::LLRINT, MVT::f64, Legal); 548 setOperationAction(ISD::LLRINT, MVT::f32, Legal); 549 setOperationAction(ISD::LROUND, MVT::f64, Legal); 550 setOperationAction(ISD::LROUND, MVT::f32, Legal); 551 setOperationAction(ISD::LLROUND, MVT::f64, Legal); 552 setOperationAction(ISD::LLROUND, MVT::f32, Legal); 553 } 554 } else { 555 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 556 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 557 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 558 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 559 } 560 561 // We cannot sextinreg(i1). Expand to shifts. 562 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 563 564 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 565 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 566 // support continuation, user-level threading, and etc.. As a result, no 567 // other SjLj exception interfaces are implemented and please don't build 568 // your own exception handling based on them. 569 // LLVM/Clang supports zero-cost DWARF exception handling. 570 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 571 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 572 573 // We want to legalize GlobalAddress and ConstantPool nodes into the 574 // appropriate instructions to materialize the address. 575 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 576 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 577 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 578 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 579 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 580 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 581 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 582 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 583 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 584 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 585 586 // TRAP is legal. 587 setOperationAction(ISD::TRAP, MVT::Other, Legal); 588 589 // TRAMPOLINE is custom lowered. 590 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 591 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 592 593 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 594 setOperationAction(ISD::VASTART , MVT::Other, Custom); 595 596 if (Subtarget.is64BitELFABI()) { 597 // VAARG always uses double-word chunks, so promote anything smaller. 598 setOperationAction(ISD::VAARG, MVT::i1, Promote); 599 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64); 600 setOperationAction(ISD::VAARG, MVT::i8, Promote); 601 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64); 602 setOperationAction(ISD::VAARG, MVT::i16, Promote); 603 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64); 604 setOperationAction(ISD::VAARG, MVT::i32, Promote); 605 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64); 606 setOperationAction(ISD::VAARG, MVT::Other, Expand); 607 } else if (Subtarget.is32BitELFABI()) { 608 // VAARG is custom lowered with the 32-bit SVR4 ABI. 609 setOperationAction(ISD::VAARG, MVT::Other, Custom); 610 setOperationAction(ISD::VAARG, MVT::i64, Custom); 611 } else 612 setOperationAction(ISD::VAARG, MVT::Other, Expand); 613 614 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 615 if (Subtarget.is32BitELFABI()) 616 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 617 else 618 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 619 620 // Use the default implementation. 621 setOperationAction(ISD::VAEND , MVT::Other, Expand); 622 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 623 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 624 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 625 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 626 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 627 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 628 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 629 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 630 631 // We want to custom lower some of our intrinsics. 632 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 633 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f64, Custom); 634 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::ppcf128, Custom); 635 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 636 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f64, Custom); 637 638 // To handle counter-based loop conditions. 639 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 640 641 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 642 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 643 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 644 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 645 646 // Comparisons that require checking two conditions. 647 if (Subtarget.hasSPE()) { 648 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 649 setCondCodeAction(ISD::SETO, MVT::f64, Expand); 650 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 651 setCondCodeAction(ISD::SETUO, MVT::f64, Expand); 652 } 653 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 654 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 655 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 656 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 657 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 658 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 659 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 660 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 661 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 662 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 663 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 664 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 665 666 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal); 667 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal); 668 669 if (Subtarget.has64BitSupport()) { 670 // They also have instructions for converting between i64 and fp. 671 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); 672 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand); 673 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); 674 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand); 675 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 676 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 677 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 678 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 679 // This is just the low 32 bits of a (signed) fp->i64 conversion. 680 // We cannot do this with Promote because i64 is not a legal type. 681 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 682 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 683 684 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) { 685 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 686 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); 687 } 688 } else { 689 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 690 if (Subtarget.hasSPE()) { 691 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal); 692 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); 693 } else { 694 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand); 695 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 696 } 697 } 698 699 // With the instructions enabled under FPCVT, we can do everything. 700 if (Subtarget.hasFPCVT()) { 701 if (Subtarget.has64BitSupport()) { 702 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); 703 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); 704 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); 705 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom); 706 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 707 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 708 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 709 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 710 } 711 712 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 713 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 714 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); 715 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom); 716 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 717 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 718 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 719 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 720 } 721 722 if (Subtarget.use64BitRegs()) { 723 // 64-bit PowerPC implementations can support i64 types directly 724 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 725 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 726 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 727 // 64-bit PowerPC wants to expand i128 shifts itself. 728 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 729 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 730 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 731 } else { 732 // 32-bit PowerPC wants to expand i64 shifts itself. 733 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 734 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 735 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 736 } 737 738 // PowerPC has better expansions for funnel shifts than the generic 739 // TargetLowering::expandFunnelShift. 740 if (Subtarget.has64BitSupport()) { 741 setOperationAction(ISD::FSHL, MVT::i64, Custom); 742 setOperationAction(ISD::FSHR, MVT::i64, Custom); 743 } 744 setOperationAction(ISD::FSHL, MVT::i32, Custom); 745 setOperationAction(ISD::FSHR, MVT::i32, Custom); 746 747 if (Subtarget.hasVSX()) { 748 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); 749 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); 750 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); 751 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); 752 } 753 754 if (Subtarget.hasAltivec()) { 755 for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { 756 setOperationAction(ISD::SADDSAT, VT, Legal); 757 setOperationAction(ISD::SSUBSAT, VT, Legal); 758 setOperationAction(ISD::UADDSAT, VT, Legal); 759 setOperationAction(ISD::USUBSAT, VT, Legal); 760 } 761 // First set operation action for all vector types to expand. Then we 762 // will selectively turn on ones that can be effectively codegen'd. 763 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 764 // add/sub are legal for all supported vector VT's. 765 setOperationAction(ISD::ADD, VT, Legal); 766 setOperationAction(ISD::SUB, VT, Legal); 767 768 // For v2i64, these are only valid with P8Vector. This is corrected after 769 // the loop. 770 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) { 771 setOperationAction(ISD::SMAX, VT, Legal); 772 setOperationAction(ISD::SMIN, VT, Legal); 773 setOperationAction(ISD::UMAX, VT, Legal); 774 setOperationAction(ISD::UMIN, VT, Legal); 775 } 776 else { 777 setOperationAction(ISD::SMAX, VT, Expand); 778 setOperationAction(ISD::SMIN, VT, Expand); 779 setOperationAction(ISD::UMAX, VT, Expand); 780 setOperationAction(ISD::UMIN, VT, Expand); 781 } 782 783 if (Subtarget.hasVSX()) { 784 setOperationAction(ISD::FMAXNUM, VT, Legal); 785 setOperationAction(ISD::FMINNUM, VT, Legal); 786 } 787 788 // Vector instructions introduced in P8 789 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 790 setOperationAction(ISD::CTPOP, VT, Legal); 791 setOperationAction(ISD::CTLZ, VT, Legal); 792 } 793 else { 794 setOperationAction(ISD::CTPOP, VT, Expand); 795 setOperationAction(ISD::CTLZ, VT, Expand); 796 } 797 798 // Vector instructions introduced in P9 799 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 800 setOperationAction(ISD::CTTZ, VT, Legal); 801 else 802 setOperationAction(ISD::CTTZ, VT, Expand); 803 804 // We promote all shuffles to v16i8. 805 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 806 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 807 808 // We promote all non-typed operations to v4i32. 809 setOperationAction(ISD::AND , VT, Promote); 810 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 811 setOperationAction(ISD::OR , VT, Promote); 812 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 813 setOperationAction(ISD::XOR , VT, Promote); 814 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 815 setOperationAction(ISD::LOAD , VT, Promote); 816 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 817 setOperationAction(ISD::SELECT, VT, Promote); 818 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 819 setOperationAction(ISD::VSELECT, VT, Legal); 820 setOperationAction(ISD::SELECT_CC, VT, Promote); 821 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 822 setOperationAction(ISD::STORE, VT, Promote); 823 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 824 825 // No other operations are legal. 826 setOperationAction(ISD::MUL , VT, Expand); 827 setOperationAction(ISD::SDIV, VT, Expand); 828 setOperationAction(ISD::SREM, VT, Expand); 829 setOperationAction(ISD::UDIV, VT, Expand); 830 setOperationAction(ISD::UREM, VT, Expand); 831 setOperationAction(ISD::FDIV, VT, Expand); 832 setOperationAction(ISD::FREM, VT, Expand); 833 setOperationAction(ISD::FNEG, VT, Expand); 834 setOperationAction(ISD::FSQRT, VT, Expand); 835 setOperationAction(ISD::FLOG, VT, Expand); 836 setOperationAction(ISD::FLOG10, VT, Expand); 837 setOperationAction(ISD::FLOG2, VT, Expand); 838 setOperationAction(ISD::FEXP, VT, Expand); 839 setOperationAction(ISD::FEXP2, VT, Expand); 840 setOperationAction(ISD::FSIN, VT, Expand); 841 setOperationAction(ISD::FCOS, VT, Expand); 842 setOperationAction(ISD::FABS, VT, Expand); 843 setOperationAction(ISD::FFLOOR, VT, Expand); 844 setOperationAction(ISD::FCEIL, VT, Expand); 845 setOperationAction(ISD::FTRUNC, VT, Expand); 846 setOperationAction(ISD::FRINT, VT, Expand); 847 setOperationAction(ISD::FNEARBYINT, VT, Expand); 848 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 849 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 850 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 851 setOperationAction(ISD::MULHU, VT, Expand); 852 setOperationAction(ISD::MULHS, VT, Expand); 853 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 854 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 855 setOperationAction(ISD::UDIVREM, VT, Expand); 856 setOperationAction(ISD::SDIVREM, VT, Expand); 857 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 858 setOperationAction(ISD::FPOW, VT, Expand); 859 setOperationAction(ISD::BSWAP, VT, Expand); 860 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 861 setOperationAction(ISD::ROTL, VT, Expand); 862 setOperationAction(ISD::ROTR, VT, Expand); 863 864 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { 865 setTruncStoreAction(VT, InnerVT, Expand); 866 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 867 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 868 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 869 } 870 } 871 setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand); 872 if (!Subtarget.hasP8Vector()) { 873 setOperationAction(ISD::SMAX, MVT::v2i64, Expand); 874 setOperationAction(ISD::SMIN, MVT::v2i64, Expand); 875 setOperationAction(ISD::UMAX, MVT::v2i64, Expand); 876 setOperationAction(ISD::UMIN, MVT::v2i64, Expand); 877 } 878 879 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 880 // with merges, splats, etc. 881 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 882 883 // Vector truncates to sub-word integer that fit in an Altivec/VSX register 884 // are cheap, so handle them before they get expanded to scalar. 885 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); 886 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); 887 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); 888 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); 889 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); 890 891 setOperationAction(ISD::AND , MVT::v4i32, Legal); 892 setOperationAction(ISD::OR , MVT::v4i32, Legal); 893 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 894 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 895 setOperationAction(ISD::SELECT, MVT::v4i32, 896 Subtarget.useCRBits() ? Legal : Expand); 897 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 898 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); 899 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal); 900 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal); 901 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal); 902 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 903 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 904 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 905 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 906 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 907 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 908 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 909 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 910 911 // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8. 912 setOperationAction(ISD::ROTL, MVT::v1i128, Custom); 913 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w). 914 if (Subtarget.hasAltivec()) 915 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8}) 916 setOperationAction(ISD::ROTL, VT, Legal); 917 // With hasP8Altivec set, we can lower ISD::ROTL to vrld. 918 if (Subtarget.hasP8Altivec()) 919 setOperationAction(ISD::ROTL, MVT::v2i64, Legal); 920 921 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 922 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 923 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 924 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 925 926 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 927 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 928 929 if (Subtarget.hasVSX()) { 930 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 931 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 932 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 933 } 934 935 if (Subtarget.hasP8Altivec()) 936 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 937 else 938 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 939 940 if (Subtarget.isISA3_1()) { 941 setOperationAction(ISD::MUL, MVT::v2i64, Legal); 942 setOperationAction(ISD::MULHS, MVT::v2i64, Legal); 943 setOperationAction(ISD::MULHU, MVT::v2i64, Legal); 944 setOperationAction(ISD::MULHS, MVT::v4i32, Legal); 945 setOperationAction(ISD::MULHU, MVT::v4i32, Legal); 946 setOperationAction(ISD::UDIV, MVT::v2i64, Legal); 947 setOperationAction(ISD::SDIV, MVT::v2i64, Legal); 948 setOperationAction(ISD::UDIV, MVT::v4i32, Legal); 949 setOperationAction(ISD::SDIV, MVT::v4i32, Legal); 950 setOperationAction(ISD::UREM, MVT::v2i64, Legal); 951 setOperationAction(ISD::SREM, MVT::v2i64, Legal); 952 setOperationAction(ISD::UREM, MVT::v4i32, Legal); 953 setOperationAction(ISD::SREM, MVT::v4i32, Legal); 954 setOperationAction(ISD::UREM, MVT::v1i128, Legal); 955 setOperationAction(ISD::SREM, MVT::v1i128, Legal); 956 setOperationAction(ISD::UDIV, MVT::v1i128, Legal); 957 setOperationAction(ISD::SDIV, MVT::v1i128, Legal); 958 setOperationAction(ISD::ROTL, MVT::v1i128, Legal); 959 } 960 961 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 962 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 963 964 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 965 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 966 967 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 968 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 969 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 970 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 971 972 // Altivec does not contain unordered floating-point compare instructions 973 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 974 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 975 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 976 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 977 978 if (Subtarget.hasVSX()) { 979 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 980 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 981 if (Subtarget.hasP8Vector()) { 982 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 983 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 984 } 985 if (Subtarget.hasDirectMove() && isPPC64) { 986 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 987 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 988 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 989 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 990 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 991 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 992 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 993 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 994 } 995 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 996 997 // The nearbyint variants are not allowed to raise the inexact exception 998 // so we can only code-gen them with unsafe math. 999 if (TM.Options.UnsafeFPMath) { 1000 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 1001 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 1002 } 1003 1004 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 1005 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 1006 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 1007 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 1008 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 1009 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 1010 setOperationAction(ISD::FROUND, MVT::f64, Legal); 1011 setOperationAction(ISD::FRINT, MVT::f64, Legal); 1012 1013 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 1014 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 1015 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 1016 setOperationAction(ISD::FROUND, MVT::f32, Legal); 1017 setOperationAction(ISD::FRINT, MVT::f32, Legal); 1018 1019 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 1020 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 1021 1022 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 1023 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 1024 1025 // Share the Altivec comparison restrictions. 1026 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 1027 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 1028 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 1029 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 1030 1031 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 1032 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 1033 1034 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 1035 1036 if (Subtarget.hasP8Vector()) 1037 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 1038 1039 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 1040 1041 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 1042 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 1043 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 1044 1045 if (Subtarget.hasP8Altivec()) { 1046 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 1047 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 1048 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 1049 1050 // 128 bit shifts can be accomplished via 3 instructions for SHL and 1051 // SRL, but not for SRA because of the instructions available: 1052 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 1053 // doing 1054 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 1055 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 1056 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 1057 1058 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 1059 } 1060 else { 1061 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 1062 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 1063 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 1064 1065 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 1066 1067 // VSX v2i64 only supports non-arithmetic operations. 1068 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 1069 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 1070 } 1071 1072 if (Subtarget.isISA3_1()) 1073 setOperationAction(ISD::SETCC, MVT::v1i128, Legal); 1074 else 1075 setOperationAction(ISD::SETCC, MVT::v1i128, Expand); 1076 1077 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 1078 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 1079 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 1080 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 1081 1082 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 1083 1084 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal); 1085 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal); 1086 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal); 1087 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal); 1088 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 1089 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 1090 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 1091 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 1092 1093 // Custom handling for partial vectors of integers converted to 1094 // floating point. We already have optimal handling for v2i32 through 1095 // the DAG combine, so those aren't necessary. 1096 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom); 1097 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom); 1098 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom); 1099 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom); 1100 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom); 1101 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom); 1102 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom); 1103 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom); 1104 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom); 1105 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 1106 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom); 1107 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 1108 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom); 1109 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom); 1110 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom); 1111 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 1112 1113 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 1114 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 1115 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 1116 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 1117 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 1118 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal); 1119 1120 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 1121 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 1122 1123 // Handle constrained floating-point operations of vector. 1124 // The predictor is `hasVSX` because altivec instruction has 1125 // no exception but VSX vector instruction has. 1126 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal); 1127 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); 1128 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); 1129 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); 1130 setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal); 1131 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal); 1132 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal); 1133 setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal); 1134 setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal); 1135 setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal); 1136 setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal); 1137 setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal); 1138 setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal); 1139 1140 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); 1141 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); 1142 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); 1143 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); 1144 setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal); 1145 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal); 1146 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal); 1147 setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal); 1148 setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal); 1149 setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal); 1150 setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal); 1151 setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal); 1152 setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal); 1153 1154 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 1155 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 1156 1157 for (MVT FPT : MVT::fp_valuetypes()) 1158 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 1159 1160 // Expand the SELECT to SELECT_CC 1161 setOperationAction(ISD::SELECT, MVT::f128, Expand); 1162 1163 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 1164 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 1165 1166 // No implementation for these ops for PowerPC. 1167 setOperationAction(ISD::FSIN, MVT::f128, Expand); 1168 setOperationAction(ISD::FCOS, MVT::f128, Expand); 1169 setOperationAction(ISD::FPOW, MVT::f128, Expand); 1170 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 1171 setOperationAction(ISD::FREM, MVT::f128, Expand); 1172 } 1173 1174 if (Subtarget.hasP8Altivec()) { 1175 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 1176 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 1177 } 1178 1179 if (Subtarget.hasP9Vector()) { 1180 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 1181 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 1182 1183 // 128 bit shifts can be accomplished via 3 instructions for SHL and 1184 // SRL, but not for SRA because of the instructions available: 1185 // VS{RL} and VS{RL}O. 1186 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 1187 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 1188 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 1189 1190 setOperationAction(ISD::FADD, MVT::f128, Legal); 1191 setOperationAction(ISD::FSUB, MVT::f128, Legal); 1192 setOperationAction(ISD::FDIV, MVT::f128, Legal); 1193 setOperationAction(ISD::FMUL, MVT::f128, Legal); 1194 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 1195 1196 setOperationAction(ISD::FMA, MVT::f128, Legal); 1197 setCondCodeAction(ISD::SETULT, MVT::f128, Expand); 1198 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); 1199 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); 1200 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); 1201 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); 1202 setCondCodeAction(ISD::SETONE, MVT::f128, Expand); 1203 1204 setOperationAction(ISD::FTRUNC, MVT::f128, Legal); 1205 setOperationAction(ISD::FRINT, MVT::f128, Legal); 1206 setOperationAction(ISD::FFLOOR, MVT::f128, Legal); 1207 setOperationAction(ISD::FCEIL, MVT::f128, Legal); 1208 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); 1209 setOperationAction(ISD::FROUND, MVT::f128, Legal); 1210 1211 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 1212 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); 1213 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 1214 1215 // Handle constrained floating-point operations of fp128 1216 setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal); 1217 setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal); 1218 setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal); 1219 setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal); 1220 setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal); 1221 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal); 1222 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal); 1223 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal); 1224 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); 1225 setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal); 1226 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal); 1227 setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal); 1228 setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal); 1229 setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal); 1230 setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal); 1231 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 1232 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal); 1233 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal); 1234 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal); 1235 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal); 1236 } else if (Subtarget.hasVSX()) { 1237 setOperationAction(ISD::LOAD, MVT::f128, Promote); 1238 setOperationAction(ISD::STORE, MVT::f128, Promote); 1239 1240 AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32); 1241 AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32); 1242 1243 // Set FADD/FSUB as libcall to avoid the legalizer to expand the 1244 // fp_to_uint and int_to_fp. 1245 setOperationAction(ISD::FADD, MVT::f128, LibCall); 1246 setOperationAction(ISD::FSUB, MVT::f128, LibCall); 1247 1248 setOperationAction(ISD::FMUL, MVT::f128, Expand); 1249 setOperationAction(ISD::FDIV, MVT::f128, Expand); 1250 setOperationAction(ISD::FNEG, MVT::f128, Expand); 1251 setOperationAction(ISD::FABS, MVT::f128, Expand); 1252 setOperationAction(ISD::FSQRT, MVT::f128, Expand); 1253 setOperationAction(ISD::FMA, MVT::f128, Expand); 1254 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 1255 1256 // Expand the fp_extend if the target type is fp128. 1257 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand); 1258 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand); 1259 1260 // Expand the fp_round if the source type is fp128. 1261 for (MVT VT : {MVT::f32, MVT::f64}) { 1262 setOperationAction(ISD::FP_ROUND, VT, Custom); 1263 setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom); 1264 } 1265 1266 setOperationAction(ISD::SETCC, MVT::f128, Custom); 1267 setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom); 1268 setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom); 1269 setOperationAction(ISD::BR_CC, MVT::f128, Expand); 1270 1271 // Lower following f128 select_cc pattern: 1272 // select_cc x, y, tv, fv, cc -> select_cc (setcc x, y, cc), 0, tv, fv, NE 1273 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); 1274 1275 // We need to handle f128 SELECT_CC with integer result type. 1276 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 1277 setOperationAction(ISD::SELECT_CC, MVT::i64, isPPC64 ? Custom : Expand); 1278 } 1279 1280 if (Subtarget.hasP9Altivec()) { 1281 if (Subtarget.isISA3_1()) { 1282 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 1283 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Legal); 1284 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Legal); 1285 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); 1286 } else { 1287 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 1288 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 1289 } 1290 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); 1291 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); 1292 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); 1293 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); 1294 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); 1295 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 1296 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 1297 } 1298 1299 if (Subtarget.hasP10Vector()) { 1300 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); 1301 } 1302 } 1303 1304 if (Subtarget.pairedVectorMemops()) { 1305 addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass); 1306 setOperationAction(ISD::LOAD, MVT::v256i1, Custom); 1307 setOperationAction(ISD::STORE, MVT::v256i1, Custom); 1308 } 1309 if (Subtarget.hasMMA()) { 1310 addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass); 1311 setOperationAction(ISD::LOAD, MVT::v512i1, Custom); 1312 setOperationAction(ISD::STORE, MVT::v512i1, Custom); 1313 setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom); 1314 } 1315 1316 if (Subtarget.has64BitSupport()) 1317 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 1318 1319 if (Subtarget.isISA3_1()) 1320 setOperationAction(ISD::SRA, MVT::v1i128, Legal); 1321 1322 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 1323 1324 if (!isPPC64) { 1325 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 1326 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 1327 } 1328 1329 if (shouldInlineQuadwordAtomics()) { 1330 setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); 1331 setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); 1332 setOperationAction(ISD::INTRINSIC_VOID, MVT::i128, Custom); 1333 } 1334 1335 setBooleanContents(ZeroOrOneBooleanContent); 1336 1337 if (Subtarget.hasAltivec()) { 1338 // Altivec instructions set fields to all zeros or all ones. 1339 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 1340 } 1341 1342 setLibcallName(RTLIB::MULO_I128, nullptr); 1343 if (!isPPC64) { 1344 // These libcalls are not available in 32-bit. 1345 setLibcallName(RTLIB::SHL_I128, nullptr); 1346 setLibcallName(RTLIB::SRL_I128, nullptr); 1347 setLibcallName(RTLIB::SRA_I128, nullptr); 1348 setLibcallName(RTLIB::MUL_I128, nullptr); 1349 setLibcallName(RTLIB::MULO_I64, nullptr); 1350 } 1351 1352 if (!isPPC64) 1353 setMaxAtomicSizeInBitsSupported(32); 1354 else if (shouldInlineQuadwordAtomics()) 1355 setMaxAtomicSizeInBitsSupported(128); 1356 else 1357 setMaxAtomicSizeInBitsSupported(64); 1358 1359 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 1360 1361 // We have target-specific dag combine patterns for the following nodes: 1362 setTargetDAGCombine({ISD::ADD, ISD::SHL, ISD::SRA, ISD::SRL, ISD::MUL, 1363 ISD::FMA, ISD::SINT_TO_FP, ISD::BUILD_VECTOR}); 1364 if (Subtarget.hasFPCVT()) 1365 setTargetDAGCombine(ISD::UINT_TO_FP); 1366 setTargetDAGCombine({ISD::LOAD, ISD::STORE, ISD::BR_CC}); 1367 if (Subtarget.useCRBits()) 1368 setTargetDAGCombine(ISD::BRCOND); 1369 setTargetDAGCombine({ISD::BSWAP, ISD::INTRINSIC_WO_CHAIN, 1370 ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID}); 1371 1372 setTargetDAGCombine({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND}); 1373 1374 setTargetDAGCombine({ISD::TRUNCATE, ISD::VECTOR_SHUFFLE}); 1375 1376 if (Subtarget.useCRBits()) { 1377 setTargetDAGCombine({ISD::TRUNCATE, ISD::SETCC, ISD::SELECT_CC}); 1378 } 1379 1380 if (Subtarget.hasP9Altivec()) { 1381 setTargetDAGCombine({ISD::ABS, ISD::VSELECT}); 1382 } 1383 1384 setLibcallName(RTLIB::LOG_F128, "logf128"); 1385 setLibcallName(RTLIB::LOG2_F128, "log2f128"); 1386 setLibcallName(RTLIB::LOG10_F128, "log10f128"); 1387 setLibcallName(RTLIB::EXP_F128, "expf128"); 1388 setLibcallName(RTLIB::EXP2_F128, "exp2f128"); 1389 setLibcallName(RTLIB::SIN_F128, "sinf128"); 1390 setLibcallName(RTLIB::COS_F128, "cosf128"); 1391 setLibcallName(RTLIB::POW_F128, "powf128"); 1392 setLibcallName(RTLIB::FMIN_F128, "fminf128"); 1393 setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); 1394 setLibcallName(RTLIB::REM_F128, "fmodf128"); 1395 setLibcallName(RTLIB::SQRT_F128, "sqrtf128"); 1396 setLibcallName(RTLIB::CEIL_F128, "ceilf128"); 1397 setLibcallName(RTLIB::FLOOR_F128, "floorf128"); 1398 setLibcallName(RTLIB::TRUNC_F128, "truncf128"); 1399 setLibcallName(RTLIB::ROUND_F128, "roundf128"); 1400 setLibcallName(RTLIB::LROUND_F128, "lroundf128"); 1401 setLibcallName(RTLIB::LLROUND_F128, "llroundf128"); 1402 setLibcallName(RTLIB::RINT_F128, "rintf128"); 1403 setLibcallName(RTLIB::LRINT_F128, "lrintf128"); 1404 setLibcallName(RTLIB::LLRINT_F128, "llrintf128"); 1405 setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128"); 1406 setLibcallName(RTLIB::FMA_F128, "fmaf128"); 1407 1408 // With 32 condition bits, we don't need to sink (and duplicate) compares 1409 // aggressively in CodeGenPrep. 1410 if (Subtarget.useCRBits()) { 1411 setHasMultipleConditionRegisters(); 1412 setJumpIsExpensive(); 1413 } 1414 1415 setMinFunctionAlignment(Align(4)); 1416 1417 switch (Subtarget.getCPUDirective()) { 1418 default: break; 1419 case PPC::DIR_970: 1420 case PPC::DIR_A2: 1421 case PPC::DIR_E500: 1422 case PPC::DIR_E500mc: 1423 case PPC::DIR_E5500: 1424 case PPC::DIR_PWR4: 1425 case PPC::DIR_PWR5: 1426 case PPC::DIR_PWR5X: 1427 case PPC::DIR_PWR6: 1428 case PPC::DIR_PWR6X: 1429 case PPC::DIR_PWR7: 1430 case PPC::DIR_PWR8: 1431 case PPC::DIR_PWR9: 1432 case PPC::DIR_PWR10: 1433 case PPC::DIR_PWR_FUTURE: 1434 setPrefLoopAlignment(Align(16)); 1435 setPrefFunctionAlignment(Align(16)); 1436 break; 1437 } 1438 1439 if (Subtarget.enableMachineScheduler()) 1440 setSchedulingPreference(Sched::Source); 1441 else 1442 setSchedulingPreference(Sched::Hybrid); 1443 1444 computeRegisterProperties(STI.getRegisterInfo()); 1445 1446 // The Freescale cores do better with aggressive inlining of memcpy and 1447 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1448 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc || 1449 Subtarget.getCPUDirective() == PPC::DIR_E5500) { 1450 MaxStoresPerMemset = 32; 1451 MaxStoresPerMemsetOptSize = 16; 1452 MaxStoresPerMemcpy = 32; 1453 MaxStoresPerMemcpyOptSize = 8; 1454 MaxStoresPerMemmove = 32; 1455 MaxStoresPerMemmoveOptSize = 8; 1456 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) { 1457 // The A2 also benefits from (very) aggressive inlining of memcpy and 1458 // friends. The overhead of a the function call, even when warm, can be 1459 // over one hundred cycles. 1460 MaxStoresPerMemset = 128; 1461 MaxStoresPerMemcpy = 128; 1462 MaxStoresPerMemmove = 128; 1463 MaxLoadsPerMemcmp = 128; 1464 } else { 1465 MaxLoadsPerMemcmp = 8; 1466 MaxLoadsPerMemcmpOptSize = 4; 1467 } 1468 1469 IsStrictFPEnabled = true; 1470 1471 // Let the subtarget (CPU) decide if a predictable select is more expensive 1472 // than the corresponding branch. This information is used in CGP to decide 1473 // when to convert selects into branches. 1474 PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive(); 1475 } 1476 1477 // *********************************** NOTE ************************************ 1478 // For selecting load and store instructions, the addressing modes are defined 1479 // as ComplexPatterns in PPCInstrInfo.td, which are then utilized in the TD 1480 // patterns to match the load the store instructions. 1481 // 1482 // The TD definitions for the addressing modes correspond to their respective 1483 // Select<AddrMode>Form() function in PPCISelDAGToDAG.cpp. These functions rely 1484 // on SelectOptimalAddrMode(), which calls computeMOFlags() to compute the 1485 // address mode flags of a particular node. Afterwards, the computed address 1486 // flags are passed into getAddrModeForFlags() in order to retrieve the optimal 1487 // addressing mode. SelectOptimalAddrMode() then sets the Base and Displacement 1488 // accordingly, based on the preferred addressing mode. 1489 // 1490 // Within PPCISelLowering.h, there are two enums: MemOpFlags and AddrMode. 1491 // MemOpFlags contains all the possible flags that can be used to compute the 1492 // optimal addressing mode for load and store instructions. 1493 // AddrMode contains all the possible load and store addressing modes available 1494 // on Power (such as DForm, DSForm, DQForm, XForm, etc.) 1495 // 1496 // When adding new load and store instructions, it is possible that new address 1497 // flags may need to be added into MemOpFlags, and a new addressing mode will 1498 // need to be added to AddrMode. An entry of the new addressing mode (consisting 1499 // of the minimal and main distinguishing address flags for the new load/store 1500 // instructions) will need to be added into initializeAddrModeMap() below. 1501 // Finally, when adding new addressing modes, the getAddrModeForFlags() will 1502 // need to be updated to account for selecting the optimal addressing mode. 1503 // ***************************************************************************** 1504 /// Initialize the map that relates the different addressing modes of the load 1505 /// and store instructions to a set of flags. This ensures the load/store 1506 /// instruction is correctly matched during instruction selection. 1507 void PPCTargetLowering::initializeAddrModeMap() { 1508 AddrModesMap[PPC::AM_DForm] = { 1509 // LWZ, STW 1510 PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_WordInt, 1511 PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_WordInt, 1512 PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt, 1513 PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt, 1514 // LBZ, LHZ, STB, STH 1515 PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt, 1516 PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt, 1517 PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt, 1518 PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt, 1519 // LHA 1520 PPC::MOF_SExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt, 1521 PPC::MOF_SExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt, 1522 PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt, 1523 PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt, 1524 // LFS, LFD, STFS, STFD 1525 PPC::MOF_RPlusSImm16 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9, 1526 PPC::MOF_RPlusLo | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9, 1527 PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9, 1528 PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9, 1529 }; 1530 AddrModesMap[PPC::AM_DSForm] = { 1531 // LWA 1532 PPC::MOF_SExt | PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_WordInt, 1533 PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt, 1534 PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt, 1535 // LD, STD 1536 PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_DoubleWordInt, 1537 PPC::MOF_NotAddNorCst | PPC::MOF_DoubleWordInt, 1538 PPC::MOF_AddrIsSImm32 | PPC::MOF_DoubleWordInt, 1539 // DFLOADf32, DFLOADf64, DSTOREf32, DSTOREf64 1540 PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9, 1541 PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9, 1542 PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9, 1543 }; 1544 AddrModesMap[PPC::AM_DQForm] = { 1545 // LXV, STXV 1546 PPC::MOF_RPlusSImm16Mult16 | PPC::MOF_Vector | PPC::MOF_SubtargetP9, 1547 PPC::MOF_NotAddNorCst | PPC::MOF_Vector | PPC::MOF_SubtargetP9, 1548 PPC::MOF_AddrIsSImm32 | PPC::MOF_Vector | PPC::MOF_SubtargetP9, 1549 }; 1550 AddrModesMap[PPC::AM_PrefixDForm] = {PPC::MOF_RPlusSImm34 | 1551 PPC::MOF_SubtargetP10}; 1552 // TODO: Add mapping for quadword load/store. 1553 } 1554 1555 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1556 /// the desired ByVal argument alignment. 1557 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) { 1558 if (MaxAlign == MaxMaxAlign) 1559 return; 1560 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1561 if (MaxMaxAlign >= 32 && 1562 VTy->getPrimitiveSizeInBits().getFixedSize() >= 256) 1563 MaxAlign = Align(32); 1564 else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 && 1565 MaxAlign < 16) 1566 MaxAlign = Align(16); 1567 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1568 Align EltAlign; 1569 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1570 if (EltAlign > MaxAlign) 1571 MaxAlign = EltAlign; 1572 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1573 for (auto *EltTy : STy->elements()) { 1574 Align EltAlign; 1575 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1576 if (EltAlign > MaxAlign) 1577 MaxAlign = EltAlign; 1578 if (MaxAlign == MaxMaxAlign) 1579 break; 1580 } 1581 } 1582 } 1583 1584 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1585 /// function arguments in the caller parameter area. 1586 uint64_t PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1587 const DataLayout &DL) const { 1588 // 16byte and wider vectors are passed on 16byte boundary. 1589 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1590 Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4); 1591 if (Subtarget.hasAltivec()) 1592 getMaxByValAlign(Ty, Alignment, Align(16)); 1593 return Alignment.value(); 1594 } 1595 1596 bool PPCTargetLowering::useSoftFloat() const { 1597 return Subtarget.useSoftFloat(); 1598 } 1599 1600 bool PPCTargetLowering::hasSPE() const { 1601 return Subtarget.hasSPE(); 1602 } 1603 1604 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { 1605 return VT.isScalarInteger(); 1606 } 1607 1608 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1609 switch ((PPCISD::NodeType)Opcode) { 1610 case PPCISD::FIRST_NUMBER: break; 1611 case PPCISD::FSEL: return "PPCISD::FSEL"; 1612 case PPCISD::XSMAXC: return "PPCISD::XSMAXC"; 1613 case PPCISD::XSMINC: return "PPCISD::XSMINC"; 1614 case PPCISD::FCFID: return "PPCISD::FCFID"; 1615 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1616 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1617 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1618 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1619 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1620 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1621 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1622 case PPCISD::FP_TO_UINT_IN_VSR: 1623 return "PPCISD::FP_TO_UINT_IN_VSR,"; 1624 case PPCISD::FP_TO_SINT_IN_VSR: 1625 return "PPCISD::FP_TO_SINT_IN_VSR"; 1626 case PPCISD::FRE: return "PPCISD::FRE"; 1627 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1628 case PPCISD::FTSQRT: 1629 return "PPCISD::FTSQRT"; 1630 case PPCISD::FSQRT: 1631 return "PPCISD::FSQRT"; 1632 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1633 case PPCISD::VPERM: return "PPCISD::VPERM"; 1634 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1635 case PPCISD::XXSPLTI_SP_TO_DP: 1636 return "PPCISD::XXSPLTI_SP_TO_DP"; 1637 case PPCISD::XXSPLTI32DX: 1638 return "PPCISD::XXSPLTI32DX"; 1639 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1640 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1641 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1642 case PPCISD::CMPB: return "PPCISD::CMPB"; 1643 case PPCISD::Hi: return "PPCISD::Hi"; 1644 case PPCISD::Lo: return "PPCISD::Lo"; 1645 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1646 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1647 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1648 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1649 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1650 case PPCISD::PROBED_ALLOCA: return "PPCISD::PROBED_ALLOCA"; 1651 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1652 case PPCISD::SRL: return "PPCISD::SRL"; 1653 case PPCISD::SRA: return "PPCISD::SRA"; 1654 case PPCISD::SHL: return "PPCISD::SHL"; 1655 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1656 case PPCISD::CALL: return "PPCISD::CALL"; 1657 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1658 case PPCISD::CALL_NOTOC: return "PPCISD::CALL_NOTOC"; 1659 case PPCISD::CALL_RM: 1660 return "PPCISD::CALL_RM"; 1661 case PPCISD::CALL_NOP_RM: 1662 return "PPCISD::CALL_NOP_RM"; 1663 case PPCISD::CALL_NOTOC_RM: 1664 return "PPCISD::CALL_NOTOC_RM"; 1665 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1666 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1667 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1668 case PPCISD::BCTRL_RM: 1669 return "PPCISD::BCTRL_RM"; 1670 case PPCISD::BCTRL_LOAD_TOC_RM: 1671 return "PPCISD::BCTRL_LOAD_TOC_RM"; 1672 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1673 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1674 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1675 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1676 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1677 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1678 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1679 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1680 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1681 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1682 case PPCISD::SCALAR_TO_VECTOR_PERMUTED: 1683 return "PPCISD::SCALAR_TO_VECTOR_PERMUTED"; 1684 case PPCISD::ANDI_rec_1_EQ_BIT: 1685 return "PPCISD::ANDI_rec_1_EQ_BIT"; 1686 case PPCISD::ANDI_rec_1_GT_BIT: 1687 return "PPCISD::ANDI_rec_1_GT_BIT"; 1688 case PPCISD::VCMP: return "PPCISD::VCMP"; 1689 case PPCISD::VCMP_rec: return "PPCISD::VCMP_rec"; 1690 case PPCISD::LBRX: return "PPCISD::LBRX"; 1691 case PPCISD::STBRX: return "PPCISD::STBRX"; 1692 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1693 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1694 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1695 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1696 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1697 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1698 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1699 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE"; 1700 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE"; 1701 case PPCISD::ST_VSR_SCAL_INT: 1702 return "PPCISD::ST_VSR_SCAL_INT"; 1703 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1704 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1705 case PPCISD::BDZ: return "PPCISD::BDZ"; 1706 case PPCISD::MFFS: return "PPCISD::MFFS"; 1707 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1708 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1709 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1710 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1711 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1712 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1713 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1714 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1715 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1716 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1717 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1718 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1719 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1720 case PPCISD::TLSGD_AIX: return "PPCISD::TLSGD_AIX"; 1721 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1722 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1723 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1724 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1725 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1726 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1727 case PPCISD::PADDI_DTPREL: 1728 return "PPCISD::PADDI_DTPREL"; 1729 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1730 case PPCISD::SC: return "PPCISD::SC"; 1731 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1732 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1733 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1734 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1735 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1736 case PPCISD::VABSD: return "PPCISD::VABSD"; 1737 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; 1738 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64"; 1739 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE"; 1740 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI"; 1741 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH"; 1742 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF"; 1743 case PPCISD::MAT_PCREL_ADDR: return "PPCISD::MAT_PCREL_ADDR"; 1744 case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR: 1745 return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR"; 1746 case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR: 1747 return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR"; 1748 case PPCISD::ACC_BUILD: return "PPCISD::ACC_BUILD"; 1749 case PPCISD::PAIR_BUILD: return "PPCISD::PAIR_BUILD"; 1750 case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG"; 1751 case PPCISD::XXMFACC: return "PPCISD::XXMFACC"; 1752 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT"; 1753 case PPCISD::ZEXT_LD_SPLAT: return "PPCISD::ZEXT_LD_SPLAT"; 1754 case PPCISD::SEXT_LD_SPLAT: return "PPCISD::SEXT_LD_SPLAT"; 1755 case PPCISD::FNMSUB: return "PPCISD::FNMSUB"; 1756 case PPCISD::STRICT_FADDRTZ: 1757 return "PPCISD::STRICT_FADDRTZ"; 1758 case PPCISD::STRICT_FCTIDZ: 1759 return "PPCISD::STRICT_FCTIDZ"; 1760 case PPCISD::STRICT_FCTIWZ: 1761 return "PPCISD::STRICT_FCTIWZ"; 1762 case PPCISD::STRICT_FCTIDUZ: 1763 return "PPCISD::STRICT_FCTIDUZ"; 1764 case PPCISD::STRICT_FCTIWUZ: 1765 return "PPCISD::STRICT_FCTIWUZ"; 1766 case PPCISD::STRICT_FCFID: 1767 return "PPCISD::STRICT_FCFID"; 1768 case PPCISD::STRICT_FCFIDU: 1769 return "PPCISD::STRICT_FCFIDU"; 1770 case PPCISD::STRICT_FCFIDS: 1771 return "PPCISD::STRICT_FCFIDS"; 1772 case PPCISD::STRICT_FCFIDUS: 1773 return "PPCISD::STRICT_FCFIDUS"; 1774 case PPCISD::LXVRZX: return "PPCISD::LXVRZX"; 1775 } 1776 return nullptr; 1777 } 1778 1779 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1780 EVT VT) const { 1781 if (!VT.isVector()) 1782 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1783 1784 return VT.changeVectorElementTypeToInteger(); 1785 } 1786 1787 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1788 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1789 return true; 1790 } 1791 1792 //===----------------------------------------------------------------------===// 1793 // Node matching predicates, for use by the tblgen matching code. 1794 //===----------------------------------------------------------------------===// 1795 1796 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1797 static bool isFloatingPointZero(SDValue Op) { 1798 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1799 return CFP->getValueAPF().isZero(); 1800 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1801 // Maybe this has already been legalized into the constant pool? 1802 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1803 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1804 return CFP->getValueAPF().isZero(); 1805 } 1806 return false; 1807 } 1808 1809 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1810 /// true if Op is undef or if it matches the specified value. 1811 static bool isConstantOrUndef(int Op, int Val) { 1812 return Op < 0 || Op == Val; 1813 } 1814 1815 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1816 /// VPKUHUM instruction. 1817 /// The ShuffleKind distinguishes between big-endian operations with 1818 /// two different inputs (0), either-endian operations with two identical 1819 /// inputs (1), and little-endian operations with two different inputs (2). 1820 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1821 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1822 SelectionDAG &DAG) { 1823 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1824 if (ShuffleKind == 0) { 1825 if (IsLE) 1826 return false; 1827 for (unsigned i = 0; i != 16; ++i) 1828 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1829 return false; 1830 } else if (ShuffleKind == 2) { 1831 if (!IsLE) 1832 return false; 1833 for (unsigned i = 0; i != 16; ++i) 1834 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1835 return false; 1836 } else if (ShuffleKind == 1) { 1837 unsigned j = IsLE ? 0 : 1; 1838 for (unsigned i = 0; i != 8; ++i) 1839 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1840 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1841 return false; 1842 } 1843 return true; 1844 } 1845 1846 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1847 /// VPKUWUM instruction. 1848 /// The ShuffleKind distinguishes between big-endian operations with 1849 /// two different inputs (0), either-endian operations with two identical 1850 /// inputs (1), and little-endian operations with two different inputs (2). 1851 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1852 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1853 SelectionDAG &DAG) { 1854 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1855 if (ShuffleKind == 0) { 1856 if (IsLE) 1857 return false; 1858 for (unsigned i = 0; i != 16; i += 2) 1859 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1860 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1861 return false; 1862 } else if (ShuffleKind == 2) { 1863 if (!IsLE) 1864 return false; 1865 for (unsigned i = 0; i != 16; i += 2) 1866 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1867 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1868 return false; 1869 } else if (ShuffleKind == 1) { 1870 unsigned j = IsLE ? 0 : 2; 1871 for (unsigned i = 0; i != 8; i += 2) 1872 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1873 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1874 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1875 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1876 return false; 1877 } 1878 return true; 1879 } 1880 1881 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1882 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1883 /// current subtarget. 1884 /// 1885 /// The ShuffleKind distinguishes between big-endian operations with 1886 /// two different inputs (0), either-endian operations with two identical 1887 /// inputs (1), and little-endian operations with two different inputs (2). 1888 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1889 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1890 SelectionDAG &DAG) { 1891 const PPCSubtarget &Subtarget = DAG.getSubtarget<PPCSubtarget>(); 1892 if (!Subtarget.hasP8Vector()) 1893 return false; 1894 1895 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1896 if (ShuffleKind == 0) { 1897 if (IsLE) 1898 return false; 1899 for (unsigned i = 0; i != 16; i += 4) 1900 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1901 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1902 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1903 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1904 return false; 1905 } else if (ShuffleKind == 2) { 1906 if (!IsLE) 1907 return false; 1908 for (unsigned i = 0; i != 16; i += 4) 1909 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1910 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1911 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1912 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1913 return false; 1914 } else if (ShuffleKind == 1) { 1915 unsigned j = IsLE ? 0 : 4; 1916 for (unsigned i = 0; i != 8; i += 4) 1917 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1918 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1919 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1920 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1921 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1922 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1923 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1924 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1925 return false; 1926 } 1927 return true; 1928 } 1929 1930 /// isVMerge - Common function, used to match vmrg* shuffles. 1931 /// 1932 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1933 unsigned LHSStart, unsigned RHSStart) { 1934 if (N->getValueType(0) != MVT::v16i8) 1935 return false; 1936 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1937 "Unsupported merge size!"); 1938 1939 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1940 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1941 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1942 LHSStart+j+i*UnitSize) || 1943 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1944 RHSStart+j+i*UnitSize)) 1945 return false; 1946 } 1947 return true; 1948 } 1949 1950 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1951 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1952 /// The ShuffleKind distinguishes between big-endian merges with two 1953 /// different inputs (0), either-endian merges with two identical inputs (1), 1954 /// and little-endian merges with two different inputs (2). For the latter, 1955 /// the input operands are swapped (see PPCInstrAltivec.td). 1956 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1957 unsigned ShuffleKind, SelectionDAG &DAG) { 1958 if (DAG.getDataLayout().isLittleEndian()) { 1959 if (ShuffleKind == 1) // unary 1960 return isVMerge(N, UnitSize, 0, 0); 1961 else if (ShuffleKind == 2) // swapped 1962 return isVMerge(N, UnitSize, 0, 16); 1963 else 1964 return false; 1965 } else { 1966 if (ShuffleKind == 1) // unary 1967 return isVMerge(N, UnitSize, 8, 8); 1968 else if (ShuffleKind == 0) // normal 1969 return isVMerge(N, UnitSize, 8, 24); 1970 else 1971 return false; 1972 } 1973 } 1974 1975 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1976 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1977 /// The ShuffleKind distinguishes between big-endian merges with two 1978 /// different inputs (0), either-endian merges with two identical inputs (1), 1979 /// and little-endian merges with two different inputs (2). For the latter, 1980 /// the input operands are swapped (see PPCInstrAltivec.td). 1981 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1982 unsigned ShuffleKind, SelectionDAG &DAG) { 1983 if (DAG.getDataLayout().isLittleEndian()) { 1984 if (ShuffleKind == 1) // unary 1985 return isVMerge(N, UnitSize, 8, 8); 1986 else if (ShuffleKind == 2) // swapped 1987 return isVMerge(N, UnitSize, 8, 24); 1988 else 1989 return false; 1990 } else { 1991 if (ShuffleKind == 1) // unary 1992 return isVMerge(N, UnitSize, 0, 0); 1993 else if (ShuffleKind == 0) // normal 1994 return isVMerge(N, UnitSize, 0, 16); 1995 else 1996 return false; 1997 } 1998 } 1999 2000 /** 2001 * Common function used to match vmrgew and vmrgow shuffles 2002 * 2003 * The indexOffset determines whether to look for even or odd words in 2004 * the shuffle mask. This is based on the of the endianness of the target 2005 * machine. 2006 * - Little Endian: 2007 * - Use offset of 0 to check for odd elements 2008 * - Use offset of 4 to check for even elements 2009 * - Big Endian: 2010 * - Use offset of 0 to check for even elements 2011 * - Use offset of 4 to check for odd elements 2012 * A detailed description of the vector element ordering for little endian and 2013 * big endian can be found at 2014 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 2015 * Targeting your applications - what little endian and big endian IBM XL C/C++ 2016 * compiler differences mean to you 2017 * 2018 * The mask to the shuffle vector instruction specifies the indices of the 2019 * elements from the two input vectors to place in the result. The elements are 2020 * numbered in array-access order, starting with the first vector. These vectors 2021 * are always of type v16i8, thus each vector will contain 16 elements of size 2022 * 8. More info on the shuffle vector can be found in the 2023 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 2024 * Language Reference. 2025 * 2026 * The RHSStartValue indicates whether the same input vectors are used (unary) 2027 * or two different input vectors are used, based on the following: 2028 * - If the instruction uses the same vector for both inputs, the range of the 2029 * indices will be 0 to 15. In this case, the RHSStart value passed should 2030 * be 0. 2031 * - If the instruction has two different vectors then the range of the 2032 * indices will be 0 to 31. In this case, the RHSStart value passed should 2033 * be 16 (indices 0-15 specify elements in the first vector while indices 16 2034 * to 31 specify elements in the second vector). 2035 * 2036 * \param[in] N The shuffle vector SD Node to analyze 2037 * \param[in] IndexOffset Specifies whether to look for even or odd elements 2038 * \param[in] RHSStartValue Specifies the starting index for the righthand input 2039 * vector to the shuffle_vector instruction 2040 * \return true iff this shuffle vector represents an even or odd word merge 2041 */ 2042 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 2043 unsigned RHSStartValue) { 2044 if (N->getValueType(0) != MVT::v16i8) 2045 return false; 2046 2047 for (unsigned i = 0; i < 2; ++i) 2048 for (unsigned j = 0; j < 4; ++j) 2049 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 2050 i*RHSStartValue+j+IndexOffset) || 2051 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 2052 i*RHSStartValue+j+IndexOffset+8)) 2053 return false; 2054 return true; 2055 } 2056 2057 /** 2058 * Determine if the specified shuffle mask is suitable for the vmrgew or 2059 * vmrgow instructions. 2060 * 2061 * \param[in] N The shuffle vector SD Node to analyze 2062 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 2063 * \param[in] ShuffleKind Identify the type of merge: 2064 * - 0 = big-endian merge with two different inputs; 2065 * - 1 = either-endian merge with two identical inputs; 2066 * - 2 = little-endian merge with two different inputs (inputs are swapped for 2067 * little-endian merges). 2068 * \param[in] DAG The current SelectionDAG 2069 * \return true iff this shuffle mask 2070 */ 2071 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 2072 unsigned ShuffleKind, SelectionDAG &DAG) { 2073 if (DAG.getDataLayout().isLittleEndian()) { 2074 unsigned indexOffset = CheckEven ? 4 : 0; 2075 if (ShuffleKind == 1) // Unary 2076 return isVMerge(N, indexOffset, 0); 2077 else if (ShuffleKind == 2) // swapped 2078 return isVMerge(N, indexOffset, 16); 2079 else 2080 return false; 2081 } 2082 else { 2083 unsigned indexOffset = CheckEven ? 0 : 4; 2084 if (ShuffleKind == 1) // Unary 2085 return isVMerge(N, indexOffset, 0); 2086 else if (ShuffleKind == 0) // Normal 2087 return isVMerge(N, indexOffset, 16); 2088 else 2089 return false; 2090 } 2091 return false; 2092 } 2093 2094 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 2095 /// amount, otherwise return -1. 2096 /// The ShuffleKind distinguishes between big-endian operations with two 2097 /// different inputs (0), either-endian operations with two identical inputs 2098 /// (1), and little-endian operations with two different inputs (2). For the 2099 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 2100 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 2101 SelectionDAG &DAG) { 2102 if (N->getValueType(0) != MVT::v16i8) 2103 return -1; 2104 2105 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2106 2107 // Find the first non-undef value in the shuffle mask. 2108 unsigned i; 2109 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 2110 /*search*/; 2111 2112 if (i == 16) return -1; // all undef. 2113 2114 // Otherwise, check to see if the rest of the elements are consecutively 2115 // numbered from this value. 2116 unsigned ShiftAmt = SVOp->getMaskElt(i); 2117 if (ShiftAmt < i) return -1; 2118 2119 ShiftAmt -= i; 2120 bool isLE = DAG.getDataLayout().isLittleEndian(); 2121 2122 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 2123 // Check the rest of the elements to see if they are consecutive. 2124 for (++i; i != 16; ++i) 2125 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2126 return -1; 2127 } else if (ShuffleKind == 1) { 2128 // Check the rest of the elements to see if they are consecutive. 2129 for (++i; i != 16; ++i) 2130 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 2131 return -1; 2132 } else 2133 return -1; 2134 2135 if (isLE) 2136 ShiftAmt = 16 - ShiftAmt; 2137 2138 return ShiftAmt; 2139 } 2140 2141 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 2142 /// specifies a splat of a single element that is suitable for input to 2143 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.). 2144 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 2145 EVT VT = N->getValueType(0); 2146 if (VT == MVT::v2i64 || VT == MVT::v2f64) 2147 return EltSize == 8 && N->getMaskElt(0) == N->getMaskElt(1); 2148 2149 assert(VT == MVT::v16i8 && isPowerOf2_32(EltSize) && 2150 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes"); 2151 2152 // The consecutive indices need to specify an element, not part of two 2153 // different elements. So abandon ship early if this isn't the case. 2154 if (N->getMaskElt(0) % EltSize != 0) 2155 return false; 2156 2157 // This is a splat operation if each element of the permute is the same, and 2158 // if the value doesn't reference the second vector. 2159 unsigned ElementBase = N->getMaskElt(0); 2160 2161 // FIXME: Handle UNDEF elements too! 2162 if (ElementBase >= 16) 2163 return false; 2164 2165 // Check that the indices are consecutive, in the case of a multi-byte element 2166 // splatted with a v16i8 mask. 2167 for (unsigned i = 1; i != EltSize; ++i) 2168 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 2169 return false; 2170 2171 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 2172 if (N->getMaskElt(i) < 0) continue; 2173 for (unsigned j = 0; j != EltSize; ++j) 2174 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 2175 return false; 2176 } 2177 return true; 2178 } 2179 2180 /// Check that the mask is shuffling N byte elements. Within each N byte 2181 /// element of the mask, the indices could be either in increasing or 2182 /// decreasing order as long as they are consecutive. 2183 /// \param[in] N the shuffle vector SD Node to analyze 2184 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 2185 /// Word/DoubleWord/QuadWord). 2186 /// \param[in] StepLen the delta indices number among the N byte element, if 2187 /// the mask is in increasing/decreasing order then it is 1/-1. 2188 /// \return true iff the mask is shuffling N byte elements. 2189 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 2190 int StepLen) { 2191 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 2192 "Unexpected element width."); 2193 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 2194 2195 unsigned NumOfElem = 16 / Width; 2196 unsigned MaskVal[16]; // Width is never greater than 16 2197 for (unsigned i = 0; i < NumOfElem; ++i) { 2198 MaskVal[0] = N->getMaskElt(i * Width); 2199 if ((StepLen == 1) && (MaskVal[0] % Width)) { 2200 return false; 2201 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 2202 return false; 2203 } 2204 2205 for (unsigned int j = 1; j < Width; ++j) { 2206 MaskVal[j] = N->getMaskElt(i * Width + j); 2207 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 2208 return false; 2209 } 2210 } 2211 } 2212 2213 return true; 2214 } 2215 2216 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 2217 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 2218 if (!isNByteElemShuffleMask(N, 4, 1)) 2219 return false; 2220 2221 // Now we look at mask elements 0,4,8,12 2222 unsigned M0 = N->getMaskElt(0) / 4; 2223 unsigned M1 = N->getMaskElt(4) / 4; 2224 unsigned M2 = N->getMaskElt(8) / 4; 2225 unsigned M3 = N->getMaskElt(12) / 4; 2226 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 2227 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 2228 2229 // Below, let H and L be arbitrary elements of the shuffle mask 2230 // where H is in the range [4,7] and L is in the range [0,3]. 2231 // H, 1, 2, 3 or L, 5, 6, 7 2232 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 2233 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 2234 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 2235 InsertAtByte = IsLE ? 12 : 0; 2236 Swap = M0 < 4; 2237 return true; 2238 } 2239 // 0, H, 2, 3 or 4, L, 6, 7 2240 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 2241 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 2242 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 2243 InsertAtByte = IsLE ? 8 : 4; 2244 Swap = M1 < 4; 2245 return true; 2246 } 2247 // 0, 1, H, 3 or 4, 5, L, 7 2248 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 2249 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 2250 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 2251 InsertAtByte = IsLE ? 4 : 8; 2252 Swap = M2 < 4; 2253 return true; 2254 } 2255 // 0, 1, 2, H or 4, 5, 6, L 2256 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 2257 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 2258 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 2259 InsertAtByte = IsLE ? 0 : 12; 2260 Swap = M3 < 4; 2261 return true; 2262 } 2263 2264 // If both vector operands for the shuffle are the same vector, the mask will 2265 // contain only elements from the first one and the second one will be undef. 2266 if (N->getOperand(1).isUndef()) { 2267 ShiftElts = 0; 2268 Swap = true; 2269 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 2270 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 2271 InsertAtByte = IsLE ? 12 : 0; 2272 return true; 2273 } 2274 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 2275 InsertAtByte = IsLE ? 8 : 4; 2276 return true; 2277 } 2278 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 2279 InsertAtByte = IsLE ? 4 : 8; 2280 return true; 2281 } 2282 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 2283 InsertAtByte = IsLE ? 0 : 12; 2284 return true; 2285 } 2286 } 2287 2288 return false; 2289 } 2290 2291 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 2292 bool &Swap, bool IsLE) { 2293 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2294 // Ensure each byte index of the word is consecutive. 2295 if (!isNByteElemShuffleMask(N, 4, 1)) 2296 return false; 2297 2298 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 2299 unsigned M0 = N->getMaskElt(0) / 4; 2300 unsigned M1 = N->getMaskElt(4) / 4; 2301 unsigned M2 = N->getMaskElt(8) / 4; 2302 unsigned M3 = N->getMaskElt(12) / 4; 2303 2304 // If both vector operands for the shuffle are the same vector, the mask will 2305 // contain only elements from the first one and the second one will be undef. 2306 if (N->getOperand(1).isUndef()) { 2307 assert(M0 < 4 && "Indexing into an undef vector?"); 2308 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 2309 return false; 2310 2311 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 2312 Swap = false; 2313 return true; 2314 } 2315 2316 // Ensure each word index of the ShuffleVector Mask is consecutive. 2317 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 2318 return false; 2319 2320 if (IsLE) { 2321 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 2322 // Input vectors don't need to be swapped if the leading element 2323 // of the result is one of the 3 left elements of the second vector 2324 // (or if there is no shift to be done at all). 2325 Swap = false; 2326 ShiftElts = (8 - M0) % 8; 2327 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 2328 // Input vectors need to be swapped if the leading element 2329 // of the result is one of the 3 left elements of the first vector 2330 // (or if we're shifting by 4 - thereby simply swapping the vectors). 2331 Swap = true; 2332 ShiftElts = (4 - M0) % 4; 2333 } 2334 2335 return true; 2336 } else { // BE 2337 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 2338 // Input vectors don't need to be swapped if the leading element 2339 // of the result is one of the 4 elements of the first vector. 2340 Swap = false; 2341 ShiftElts = M0; 2342 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 2343 // Input vectors need to be swapped if the leading element 2344 // of the result is one of the 4 elements of the right vector. 2345 Swap = true; 2346 ShiftElts = M0 - 4; 2347 } 2348 2349 return true; 2350 } 2351 } 2352 2353 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 2354 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2355 2356 if (!isNByteElemShuffleMask(N, Width, -1)) 2357 return false; 2358 2359 for (int i = 0; i < 16; i += Width) 2360 if (N->getMaskElt(i) != i + Width - 1) 2361 return false; 2362 2363 return true; 2364 } 2365 2366 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 2367 return isXXBRShuffleMaskHelper(N, 2); 2368 } 2369 2370 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 2371 return isXXBRShuffleMaskHelper(N, 4); 2372 } 2373 2374 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 2375 return isXXBRShuffleMaskHelper(N, 8); 2376 } 2377 2378 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 2379 return isXXBRShuffleMaskHelper(N, 16); 2380 } 2381 2382 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 2383 /// if the inputs to the instruction should be swapped and set \p DM to the 2384 /// value for the immediate. 2385 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 2386 /// AND element 0 of the result comes from the first input (LE) or second input 2387 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 2388 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 2389 /// mask. 2390 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 2391 bool &Swap, bool IsLE) { 2392 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2393 2394 // Ensure each byte index of the double word is consecutive. 2395 if (!isNByteElemShuffleMask(N, 8, 1)) 2396 return false; 2397 2398 unsigned M0 = N->getMaskElt(0) / 8; 2399 unsigned M1 = N->getMaskElt(8) / 8; 2400 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 2401 2402 // If both vector operands for the shuffle are the same vector, the mask will 2403 // contain only elements from the first one and the second one will be undef. 2404 if (N->getOperand(1).isUndef()) { 2405 if ((M0 | M1) < 2) { 2406 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 2407 Swap = false; 2408 return true; 2409 } else 2410 return false; 2411 } 2412 2413 if (IsLE) { 2414 if (M0 > 1 && M1 < 2) { 2415 Swap = false; 2416 } else if (M0 < 2 && M1 > 1) { 2417 M0 = (M0 + 2) % 4; 2418 M1 = (M1 + 2) % 4; 2419 Swap = true; 2420 } else 2421 return false; 2422 2423 // Note: if control flow comes here that means Swap is already set above 2424 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 2425 return true; 2426 } else { // BE 2427 if (M0 < 2 && M1 > 1) { 2428 Swap = false; 2429 } else if (M0 > 1 && M1 < 2) { 2430 M0 = (M0 + 2) % 4; 2431 M1 = (M1 + 2) % 4; 2432 Swap = true; 2433 } else 2434 return false; 2435 2436 // Note: if control flow comes here that means Swap is already set above 2437 DM = (M0 << 1) + (M1 & 1); 2438 return true; 2439 } 2440 } 2441 2442 2443 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is 2444 /// appropriate for PPC mnemonics (which have a big endian bias - namely 2445 /// elements are counted from the left of the vector register). 2446 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, 2447 SelectionDAG &DAG) { 2448 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2449 assert(isSplatShuffleMask(SVOp, EltSize)); 2450 EVT VT = SVOp->getValueType(0); 2451 2452 if (VT == MVT::v2i64 || VT == MVT::v2f64) 2453 return DAG.getDataLayout().isLittleEndian() ? 1 - SVOp->getMaskElt(0) 2454 : SVOp->getMaskElt(0); 2455 2456 if (DAG.getDataLayout().isLittleEndian()) 2457 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 2458 else 2459 return SVOp->getMaskElt(0) / EltSize; 2460 } 2461 2462 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 2463 /// by using a vspltis[bhw] instruction of the specified element size, return 2464 /// the constant being splatted. The ByteSize field indicates the number of 2465 /// bytes of each element [124] -> [bhw]. 2466 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2467 SDValue OpVal; 2468 2469 // If ByteSize of the splat is bigger than the element size of the 2470 // build_vector, then we have a case where we are checking for a splat where 2471 // multiple elements of the buildvector are folded together into a single 2472 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 2473 unsigned EltSize = 16/N->getNumOperands(); 2474 if (EltSize < ByteSize) { 2475 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 2476 SDValue UniquedVals[4]; 2477 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 2478 2479 // See if all of the elements in the buildvector agree across. 2480 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2481 if (N->getOperand(i).isUndef()) continue; 2482 // If the element isn't a constant, bail fully out. 2483 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 2484 2485 if (!UniquedVals[i&(Multiple-1)].getNode()) 2486 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 2487 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 2488 return SDValue(); // no match. 2489 } 2490 2491 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 2492 // either constant or undef values that are identical for each chunk. See 2493 // if these chunks can form into a larger vspltis*. 2494 2495 // Check to see if all of the leading entries are either 0 or -1. If 2496 // neither, then this won't fit into the immediate field. 2497 bool LeadingZero = true; 2498 bool LeadingOnes = true; 2499 for (unsigned i = 0; i != Multiple-1; ++i) { 2500 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 2501 2502 LeadingZero &= isNullConstant(UniquedVals[i]); 2503 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 2504 } 2505 // Finally, check the least significant entry. 2506 if (LeadingZero) { 2507 if (!UniquedVals[Multiple-1].getNode()) 2508 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 2509 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 2510 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 2511 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2512 } 2513 if (LeadingOnes) { 2514 if (!UniquedVals[Multiple-1].getNode()) 2515 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 2516 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 2517 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 2518 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2519 } 2520 2521 return SDValue(); 2522 } 2523 2524 // Check to see if this buildvec has a single non-undef value in its elements. 2525 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2526 if (N->getOperand(i).isUndef()) continue; 2527 if (!OpVal.getNode()) 2528 OpVal = N->getOperand(i); 2529 else if (OpVal != N->getOperand(i)) 2530 return SDValue(); 2531 } 2532 2533 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 2534 2535 unsigned ValSizeInBytes = EltSize; 2536 uint64_t Value = 0; 2537 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2538 Value = CN->getZExtValue(); 2539 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2540 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 2541 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 2542 } 2543 2544 // If the splat value is larger than the element value, then we can never do 2545 // this splat. The only case that we could fit the replicated bits into our 2546 // immediate field for would be zero, and we prefer to use vxor for it. 2547 if (ValSizeInBytes < ByteSize) return SDValue(); 2548 2549 // If the element value is larger than the splat value, check if it consists 2550 // of a repeated bit pattern of size ByteSize. 2551 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2552 return SDValue(); 2553 2554 // Properly sign extend the value. 2555 int MaskVal = SignExtend32(Value, ByteSize * 8); 2556 2557 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2558 if (MaskVal == 0) return SDValue(); 2559 2560 // Finally, if this value fits in a 5 bit sext field, return it 2561 if (SignExtend32<5>(MaskVal) == MaskVal) 2562 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2563 return SDValue(); 2564 } 2565 2566 //===----------------------------------------------------------------------===// 2567 // Addressing Mode Selection 2568 //===----------------------------------------------------------------------===// 2569 2570 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2571 /// or 64-bit immediate, and if the value can be accurately represented as a 2572 /// sign extension from a 16-bit value. If so, this returns true and the 2573 /// immediate. 2574 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2575 if (!isa<ConstantSDNode>(N)) 2576 return false; 2577 2578 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2579 if (N->getValueType(0) == MVT::i32) 2580 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2581 else 2582 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2583 } 2584 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2585 return isIntS16Immediate(Op.getNode(), Imm); 2586 } 2587 2588 /// Used when computing address flags for selecting loads and stores. 2589 /// If we have an OR, check if the LHS and RHS are provably disjoint. 2590 /// An OR of two provably disjoint values is equivalent to an ADD. 2591 /// Most PPC load/store instructions compute the effective address as a sum, 2592 /// so doing this conversion is useful. 2593 static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N) { 2594 if (N.getOpcode() != ISD::OR) 2595 return false; 2596 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2597 if (!LHSKnown.Zero.getBoolValue()) 2598 return false; 2599 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1)); 2600 return (~(LHSKnown.Zero | RHSKnown.Zero) == 0); 2601 } 2602 2603 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can 2604 /// be represented as an indexed [r+r] operation. 2605 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base, 2606 SDValue &Index, 2607 SelectionDAG &DAG) const { 2608 for (SDNode *U : N->uses()) { 2609 if (MemSDNode *Memop = dyn_cast<MemSDNode>(U)) { 2610 if (Memop->getMemoryVT() == MVT::f64) { 2611 Base = N.getOperand(0); 2612 Index = N.getOperand(1); 2613 return true; 2614 } 2615 } 2616 } 2617 return false; 2618 } 2619 2620 /// isIntS34Immediate - This method tests if value of node given can be 2621 /// accurately represented as a sign extension from a 34-bit value. If so, 2622 /// this returns true and the immediate. 2623 bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) { 2624 if (!isa<ConstantSDNode>(N)) 2625 return false; 2626 2627 Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2628 return isInt<34>(Imm); 2629 } 2630 bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) { 2631 return isIntS34Immediate(Op.getNode(), Imm); 2632 } 2633 2634 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2635 /// can be represented as an indexed [r+r] operation. Returns false if it 2636 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is 2637 /// non-zero and N can be represented by a base register plus a signed 16-bit 2638 /// displacement, make a more precise judgement by checking (displacement % \p 2639 /// EncodingAlignment). 2640 bool PPCTargetLowering::SelectAddressRegReg( 2641 SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, 2642 MaybeAlign EncodingAlignment) const { 2643 // If we have a PC Relative target flag don't select as [reg+reg]. It will be 2644 // a [pc+imm]. 2645 if (SelectAddressPCRel(N, Base)) 2646 return false; 2647 2648 int16_t Imm = 0; 2649 if (N.getOpcode() == ISD::ADD) { 2650 // Is there any SPE load/store (f64), which can't handle 16bit offset? 2651 // SPE load/store can only handle 8-bit offsets. 2652 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG)) 2653 return true; 2654 if (isIntS16Immediate(N.getOperand(1), Imm) && 2655 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) 2656 return false; // r+i 2657 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2658 return false; // r+i 2659 2660 Base = N.getOperand(0); 2661 Index = N.getOperand(1); 2662 return true; 2663 } else if (N.getOpcode() == ISD::OR) { 2664 if (isIntS16Immediate(N.getOperand(1), Imm) && 2665 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) 2666 return false; // r+i can fold it if we can. 2667 2668 // If this is an or of disjoint bitfields, we can codegen this as an add 2669 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2670 // disjoint. 2671 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2672 2673 if (LHSKnown.Zero.getBoolValue()) { 2674 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1)); 2675 // If all of the bits are known zero on the LHS or RHS, the add won't 2676 // carry. 2677 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2678 Base = N.getOperand(0); 2679 Index = N.getOperand(1); 2680 return true; 2681 } 2682 } 2683 } 2684 2685 return false; 2686 } 2687 2688 // If we happen to be doing an i64 load or store into a stack slot that has 2689 // less than a 4-byte alignment, then the frame-index elimination may need to 2690 // use an indexed load or store instruction (because the offset may not be a 2691 // multiple of 4). The extra register needed to hold the offset comes from the 2692 // register scavenger, and it is possible that the scavenger will need to use 2693 // an emergency spill slot. As a result, we need to make sure that a spill slot 2694 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2695 // stack slot. 2696 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2697 // FIXME: This does not handle the LWA case. 2698 if (VT != MVT::i64) 2699 return; 2700 2701 // NOTE: We'll exclude negative FIs here, which come from argument 2702 // lowering, because there are no known test cases triggering this problem 2703 // using packed structures (or similar). We can remove this exclusion if 2704 // we find such a test case. The reason why this is so test-case driven is 2705 // because this entire 'fixup' is only to prevent crashes (from the 2706 // register scavenger) on not-really-valid inputs. For example, if we have: 2707 // %a = alloca i1 2708 // %b = bitcast i1* %a to i64* 2709 // store i64* a, i64 b 2710 // then the store should really be marked as 'align 1', but is not. If it 2711 // were marked as 'align 1' then the indexed form would have been 2712 // instruction-selected initially, and the problem this 'fixup' is preventing 2713 // won't happen regardless. 2714 if (FrameIdx < 0) 2715 return; 2716 2717 MachineFunction &MF = DAG.getMachineFunction(); 2718 MachineFrameInfo &MFI = MF.getFrameInfo(); 2719 2720 if (MFI.getObjectAlign(FrameIdx) >= Align(4)) 2721 return; 2722 2723 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2724 FuncInfo->setHasNonRISpills(); 2725 } 2726 2727 /// Returns true if the address N can be represented by a base register plus 2728 /// a signed 16-bit displacement [r+imm], and if it is not better 2729 /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept 2730 /// displacements that are multiples of that value. 2731 bool PPCTargetLowering::SelectAddressRegImm( 2732 SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, 2733 MaybeAlign EncodingAlignment) const { 2734 // FIXME dl should come from parent load or store, not from address 2735 SDLoc dl(N); 2736 2737 // If we have a PC Relative target flag don't select as [reg+imm]. It will be 2738 // a [pc+imm]. 2739 if (SelectAddressPCRel(N, Base)) 2740 return false; 2741 2742 // If this can be more profitably realized as r+r, fail. 2743 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment)) 2744 return false; 2745 2746 if (N.getOpcode() == ISD::ADD) { 2747 int16_t imm = 0; 2748 if (isIntS16Immediate(N.getOperand(1), imm) && 2749 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { 2750 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2751 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2752 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2753 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2754 } else { 2755 Base = N.getOperand(0); 2756 } 2757 return true; // [r+i] 2758 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2759 // Match LOAD (ADD (X, Lo(G))). 2760 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2761 && "Cannot handle constant offsets yet!"); 2762 Disp = N.getOperand(1).getOperand(0); // The global address. 2763 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2764 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2765 Disp.getOpcode() == ISD::TargetConstantPool || 2766 Disp.getOpcode() == ISD::TargetJumpTable); 2767 Base = N.getOperand(0); 2768 return true; // [&g+r] 2769 } 2770 } else if (N.getOpcode() == ISD::OR) { 2771 int16_t imm = 0; 2772 if (isIntS16Immediate(N.getOperand(1), imm) && 2773 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) { 2774 // If this is an or of disjoint bitfields, we can codegen this as an add 2775 // (for better address arithmetic) if the LHS and RHS of the OR are 2776 // provably disjoint. 2777 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2778 2779 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2780 // If all of the bits are known zero on the LHS or RHS, the add won't 2781 // carry. 2782 if (FrameIndexSDNode *FI = 2783 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2784 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2785 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2786 } else { 2787 Base = N.getOperand(0); 2788 } 2789 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2790 return true; 2791 } 2792 } 2793 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2794 // Loading from a constant address. 2795 2796 // If this address fits entirely in a 16-bit sext immediate field, codegen 2797 // this as "d, 0" 2798 int16_t Imm; 2799 if (isIntS16Immediate(CN, Imm) && 2800 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) { 2801 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2802 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2803 CN->getValueType(0)); 2804 return true; 2805 } 2806 2807 // Handle 32-bit sext immediates with LIS + addr mode. 2808 if ((CN->getValueType(0) == MVT::i32 || 2809 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2810 (!EncodingAlignment || 2811 isAligned(*EncodingAlignment, CN->getZExtValue()))) { 2812 int Addr = (int)CN->getZExtValue(); 2813 2814 // Otherwise, break this down into an LIS + disp. 2815 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2816 2817 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2818 MVT::i32); 2819 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2820 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2821 return true; 2822 } 2823 } 2824 2825 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2826 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2827 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2828 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2829 } else 2830 Base = N; 2831 return true; // [r+0] 2832 } 2833 2834 /// Similar to the 16-bit case but for instructions that take a 34-bit 2835 /// displacement field (prefixed loads/stores). 2836 bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp, 2837 SDValue &Base, 2838 SelectionDAG &DAG) const { 2839 // Only on 64-bit targets. 2840 if (N.getValueType() != MVT::i64) 2841 return false; 2842 2843 SDLoc dl(N); 2844 int64_t Imm = 0; 2845 2846 if (N.getOpcode() == ISD::ADD) { 2847 if (!isIntS34Immediate(N.getOperand(1), Imm)) 2848 return false; 2849 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); 2850 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) 2851 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2852 else 2853 Base = N.getOperand(0); 2854 return true; 2855 } 2856 2857 if (N.getOpcode() == ISD::OR) { 2858 if (!isIntS34Immediate(N.getOperand(1), Imm)) 2859 return false; 2860 // If this is an or of disjoint bitfields, we can codegen this as an add 2861 // (for better address arithmetic) if the LHS and RHS of the OR are 2862 // provably disjoint. 2863 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2864 if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL) 2865 return false; 2866 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) 2867 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2868 else 2869 Base = N.getOperand(0); 2870 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); 2871 return true; 2872 } 2873 2874 if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const. 2875 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType()); 2876 Base = DAG.getRegister(PPC::ZERO8, N.getValueType()); 2877 return true; 2878 } 2879 2880 return false; 2881 } 2882 2883 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2884 /// represented as an indexed [r+r] operation. 2885 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2886 SDValue &Index, 2887 SelectionDAG &DAG) const { 2888 // Check to see if we can easily represent this as an [r+r] address. This 2889 // will fail if it thinks that the address is more profitably represented as 2890 // reg+imm, e.g. where imm = 0. 2891 if (SelectAddressRegReg(N, Base, Index, DAG)) 2892 return true; 2893 2894 // If the address is the result of an add, we will utilize the fact that the 2895 // address calculation includes an implicit add. However, we can reduce 2896 // register pressure if we do not materialize a constant just for use as the 2897 // index register. We only get rid of the add if it is not an add of a 2898 // value and a 16-bit signed constant and both have a single use. 2899 int16_t imm = 0; 2900 if (N.getOpcode() == ISD::ADD && 2901 (!isIntS16Immediate(N.getOperand(1), imm) || 2902 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2903 Base = N.getOperand(0); 2904 Index = N.getOperand(1); 2905 return true; 2906 } 2907 2908 // Otherwise, do it the hard way, using R0 as the base register. 2909 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2910 N.getValueType()); 2911 Index = N; 2912 return true; 2913 } 2914 2915 template <typename Ty> static bool isValidPCRelNode(SDValue N) { 2916 Ty *PCRelCand = dyn_cast<Ty>(N); 2917 return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG); 2918 } 2919 2920 /// Returns true if this address is a PC Relative address. 2921 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG 2922 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR. 2923 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const { 2924 // This is a materialize PC Relative node. Always select this as PC Relative. 2925 Base = N; 2926 if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR) 2927 return true; 2928 if (isValidPCRelNode<ConstantPoolSDNode>(N) || 2929 isValidPCRelNode<GlobalAddressSDNode>(N) || 2930 isValidPCRelNode<JumpTableSDNode>(N) || 2931 isValidPCRelNode<BlockAddressSDNode>(N)) 2932 return true; 2933 return false; 2934 } 2935 2936 /// Returns true if we should use a direct load into vector instruction 2937 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. 2938 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) { 2939 2940 // If there are any other uses other than scalar to vector, then we should 2941 // keep it as a scalar load -> direct move pattern to prevent multiple 2942 // loads. 2943 LoadSDNode *LD = dyn_cast<LoadSDNode>(N); 2944 if (!LD) 2945 return false; 2946 2947 EVT MemVT = LD->getMemoryVT(); 2948 if (!MemVT.isSimple()) 2949 return false; 2950 switch(MemVT.getSimpleVT().SimpleTy) { 2951 case MVT::i64: 2952 break; 2953 case MVT::i32: 2954 if (!ST.hasP8Vector()) 2955 return false; 2956 break; 2957 case MVT::i16: 2958 case MVT::i8: 2959 if (!ST.hasP9Vector()) 2960 return false; 2961 break; 2962 default: 2963 return false; 2964 } 2965 2966 SDValue LoadedVal(N, 0); 2967 if (!LoadedVal.hasOneUse()) 2968 return false; 2969 2970 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); 2971 UI != UE; ++UI) 2972 if (UI.getUse().get().getResNo() == 0 && 2973 UI->getOpcode() != ISD::SCALAR_TO_VECTOR && 2974 UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED) 2975 return false; 2976 2977 return true; 2978 } 2979 2980 /// getPreIndexedAddressParts - returns true by value, base pointer and 2981 /// offset pointer and addressing mode by reference if the node's address 2982 /// can be legally represented as pre-indexed load / store address. 2983 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2984 SDValue &Offset, 2985 ISD::MemIndexedMode &AM, 2986 SelectionDAG &DAG) const { 2987 if (DisablePPCPreinc) return false; 2988 2989 bool isLoad = true; 2990 SDValue Ptr; 2991 EVT VT; 2992 Align Alignment; 2993 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2994 Ptr = LD->getBasePtr(); 2995 VT = LD->getMemoryVT(); 2996 Alignment = LD->getAlign(); 2997 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2998 Ptr = ST->getBasePtr(); 2999 VT = ST->getMemoryVT(); 3000 Alignment = ST->getAlign(); 3001 isLoad = false; 3002 } else 3003 return false; 3004 3005 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector 3006 // instructions because we can fold these into a more efficient instruction 3007 // instead, (such as LXSD). 3008 if (isLoad && usePartialVectorLoads(N, Subtarget)) { 3009 return false; 3010 } 3011 3012 // PowerPC doesn't have preinc load/store instructions for vectors 3013 if (VT.isVector()) 3014 return false; 3015 3016 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 3017 // Common code will reject creating a pre-inc form if the base pointer 3018 // is a frame index, or if N is a store and the base pointer is either 3019 // the same as or a predecessor of the value being stored. Check for 3020 // those situations here, and try with swapped Base/Offset instead. 3021 bool Swap = false; 3022 3023 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 3024 Swap = true; 3025 else if (!isLoad) { 3026 SDValue Val = cast<StoreSDNode>(N)->getValue(); 3027 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 3028 Swap = true; 3029 } 3030 3031 if (Swap) 3032 std::swap(Base, Offset); 3033 3034 AM = ISD::PRE_INC; 3035 return true; 3036 } 3037 3038 // LDU/STU can only handle immediates that are a multiple of 4. 3039 if (VT != MVT::i64) { 3040 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None)) 3041 return false; 3042 } else { 3043 // LDU/STU need an address with at least 4-byte alignment. 3044 if (Alignment < Align(4)) 3045 return false; 3046 3047 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4))) 3048 return false; 3049 } 3050 3051 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 3052 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 3053 // sext i32 to i64 when addr mode is r+i. 3054 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 3055 LD->getExtensionType() == ISD::SEXTLOAD && 3056 isa<ConstantSDNode>(Offset)) 3057 return false; 3058 } 3059 3060 AM = ISD::PRE_INC; 3061 return true; 3062 } 3063 3064 //===----------------------------------------------------------------------===// 3065 // LowerOperation implementation 3066 //===----------------------------------------------------------------------===// 3067 3068 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 3069 /// and LoOpFlags to the target MO flags. 3070 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 3071 unsigned &HiOpFlags, unsigned &LoOpFlags, 3072 const GlobalValue *GV = nullptr) { 3073 HiOpFlags = PPCII::MO_HA; 3074 LoOpFlags = PPCII::MO_LO; 3075 3076 // Don't use the pic base if not in PIC relocation model. 3077 if (IsPIC) { 3078 HiOpFlags |= PPCII::MO_PIC_FLAG; 3079 LoOpFlags |= PPCII::MO_PIC_FLAG; 3080 } 3081 } 3082 3083 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 3084 SelectionDAG &DAG) { 3085 SDLoc DL(HiPart); 3086 EVT PtrVT = HiPart.getValueType(); 3087 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 3088 3089 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 3090 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 3091 3092 // With PIC, the first instruction is actually "GR+hi(&G)". 3093 if (isPIC) 3094 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 3095 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 3096 3097 // Generate non-pic code that has direct accesses to the constant pool. 3098 // The address of the global is just (hi(&g)+lo(&g)). 3099 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 3100 } 3101 3102 static void setUsesTOCBasePtr(MachineFunction &MF) { 3103 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3104 FuncInfo->setUsesTOCBasePtr(); 3105 } 3106 3107 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 3108 setUsesTOCBasePtr(DAG.getMachineFunction()); 3109 } 3110 3111 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, 3112 SDValue GA) const { 3113 const bool Is64Bit = Subtarget.isPPC64(); 3114 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 3115 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) 3116 : Subtarget.isAIXABI() 3117 ? DAG.getRegister(PPC::R2, VT) 3118 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 3119 SDValue Ops[] = { GA, Reg }; 3120 return DAG.getMemIntrinsicNode( 3121 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 3122 MachinePointerInfo::getGOT(DAG.getMachineFunction()), None, 3123 MachineMemOperand::MOLoad); 3124 } 3125 3126 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 3127 SelectionDAG &DAG) const { 3128 EVT PtrVT = Op.getValueType(); 3129 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3130 const Constant *C = CP->getConstVal(); 3131 3132 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 3133 // The actual address of the GlobalValue is stored in the TOC. 3134 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 3135 if (Subtarget.isUsingPCRelativeCalls()) { 3136 SDLoc DL(CP); 3137 EVT Ty = getPointerTy(DAG.getDataLayout()); 3138 SDValue ConstPool = DAG.getTargetConstantPool( 3139 C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG); 3140 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool); 3141 } 3142 setUsesTOCBasePtr(DAG); 3143 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0); 3144 return getTOCEntry(DAG, SDLoc(CP), GA); 3145 } 3146 3147 unsigned MOHiFlag, MOLoFlag; 3148 bool IsPIC = isPositionIndependent(); 3149 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 3150 3151 if (IsPIC && Subtarget.isSVR4ABI()) { 3152 SDValue GA = 3153 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG); 3154 return getTOCEntry(DAG, SDLoc(CP), GA); 3155 } 3156 3157 SDValue CPIHi = 3158 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag); 3159 SDValue CPILo = 3160 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag); 3161 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 3162 } 3163 3164 // For 64-bit PowerPC, prefer the more compact relative encodings. 3165 // This trades 32 bits per jump table entry for one or two instructions 3166 // on the jump site. 3167 unsigned PPCTargetLowering::getJumpTableEncoding() const { 3168 if (isJumpTableRelative()) 3169 return MachineJumpTableInfo::EK_LabelDifference32; 3170 3171 return TargetLowering::getJumpTableEncoding(); 3172 } 3173 3174 bool PPCTargetLowering::isJumpTableRelative() const { 3175 if (UseAbsoluteJumpTables) 3176 return false; 3177 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) 3178 return true; 3179 return TargetLowering::isJumpTableRelative(); 3180 } 3181 3182 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 3183 SelectionDAG &DAG) const { 3184 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 3185 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 3186 3187 switch (getTargetMachine().getCodeModel()) { 3188 case CodeModel::Small: 3189 case CodeModel::Medium: 3190 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 3191 default: 3192 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 3193 getPointerTy(DAG.getDataLayout())); 3194 } 3195 } 3196 3197 const MCExpr * 3198 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 3199 unsigned JTI, 3200 MCContext &Ctx) const { 3201 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 3202 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 3203 3204 switch (getTargetMachine().getCodeModel()) { 3205 case CodeModel::Small: 3206 case CodeModel::Medium: 3207 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 3208 default: 3209 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 3210 } 3211 } 3212 3213 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 3214 EVT PtrVT = Op.getValueType(); 3215 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 3216 3217 // isUsingPCRelativeCalls() returns true when PCRelative is enabled 3218 if (Subtarget.isUsingPCRelativeCalls()) { 3219 SDLoc DL(JT); 3220 EVT Ty = getPointerTy(DAG.getDataLayout()); 3221 SDValue GA = 3222 DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG); 3223 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3224 return MatAddr; 3225 } 3226 3227 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 3228 // The actual address of the GlobalValue is stored in the TOC. 3229 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 3230 setUsesTOCBasePtr(DAG); 3231 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 3232 return getTOCEntry(DAG, SDLoc(JT), GA); 3233 } 3234 3235 unsigned MOHiFlag, MOLoFlag; 3236 bool IsPIC = isPositionIndependent(); 3237 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 3238 3239 if (IsPIC && Subtarget.isSVR4ABI()) { 3240 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 3241 PPCII::MO_PIC_FLAG); 3242 return getTOCEntry(DAG, SDLoc(GA), GA); 3243 } 3244 3245 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 3246 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 3247 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 3248 } 3249 3250 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 3251 SelectionDAG &DAG) const { 3252 EVT PtrVT = Op.getValueType(); 3253 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 3254 const BlockAddress *BA = BASDN->getBlockAddress(); 3255 3256 // isUsingPCRelativeCalls() returns true when PCRelative is enabled 3257 if (Subtarget.isUsingPCRelativeCalls()) { 3258 SDLoc DL(BASDN); 3259 EVT Ty = getPointerTy(DAG.getDataLayout()); 3260 SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(), 3261 PPCII::MO_PCREL_FLAG); 3262 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3263 return MatAddr; 3264 } 3265 3266 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 3267 // The actual BlockAddress is stored in the TOC. 3268 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 3269 setUsesTOCBasePtr(DAG); 3270 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 3271 return getTOCEntry(DAG, SDLoc(BASDN), GA); 3272 } 3273 3274 // 32-bit position-independent ELF stores the BlockAddress in the .got. 3275 if (Subtarget.is32BitELFABI() && isPositionIndependent()) 3276 return getTOCEntry( 3277 DAG, SDLoc(BASDN), 3278 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset())); 3279 3280 unsigned MOHiFlag, MOLoFlag; 3281 bool IsPIC = isPositionIndependent(); 3282 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 3283 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 3284 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 3285 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 3286 } 3287 3288 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 3289 SelectionDAG &DAG) const { 3290 if (Subtarget.isAIXABI()) 3291 return LowerGlobalTLSAddressAIX(Op, DAG); 3292 3293 return LowerGlobalTLSAddressLinux(Op, DAG); 3294 } 3295 3296 SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op, 3297 SelectionDAG &DAG) const { 3298 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3299 3300 if (DAG.getTarget().useEmulatedTLS()) 3301 report_fatal_error("Emulated TLS is not yet supported on AIX"); 3302 3303 SDLoc dl(GA); 3304 const GlobalValue *GV = GA->getGlobal(); 3305 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3306 3307 // The general-dynamic model is the only access model supported for now, so 3308 // all the GlobalTLSAddress nodes are lowered with this model. 3309 // We need to generate two TOC entries, one for the variable offset, one for 3310 // the region handle. The global address for the TOC entry of the region 3311 // handle is created with the MO_TLSGDM_FLAG flag and the global address 3312 // for the TOC entry of the variable offset is created with MO_TLSGD_FLAG. 3313 SDValue VariableOffsetTGA = 3314 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGD_FLAG); 3315 SDValue RegionHandleTGA = 3316 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGDM_FLAG); 3317 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA); 3318 SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA); 3319 return DAG.getNode(PPCISD::TLSGD_AIX, dl, PtrVT, VariableOffset, 3320 RegionHandle); 3321 } 3322 3323 SDValue PPCTargetLowering::LowerGlobalTLSAddressLinux(SDValue Op, 3324 SelectionDAG &DAG) const { 3325 // FIXME: TLS addresses currently use medium model code sequences, 3326 // which is the most useful form. Eventually support for small and 3327 // large models could be added if users need it, at the cost of 3328 // additional complexity. 3329 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3330 if (DAG.getTarget().useEmulatedTLS()) 3331 return LowerToTLSEmulatedModel(GA, DAG); 3332 3333 SDLoc dl(GA); 3334 const GlobalValue *GV = GA->getGlobal(); 3335 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3336 bool is64bit = Subtarget.isPPC64(); 3337 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 3338 PICLevel::Level picLevel = M->getPICLevel(); 3339 3340 const TargetMachine &TM = getTargetMachine(); 3341 TLSModel::Model Model = TM.getTLSModel(GV); 3342 3343 if (Model == TLSModel::LocalExec) { 3344 if (Subtarget.isUsingPCRelativeCalls()) { 3345 SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64); 3346 SDValue TGA = DAG.getTargetGlobalAddress( 3347 GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG)); 3348 SDValue MatAddr = 3349 DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA); 3350 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr); 3351 } 3352 3353 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3354 PPCII::MO_TPREL_HA); 3355 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3356 PPCII::MO_TPREL_LO); 3357 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 3358 : DAG.getRegister(PPC::R2, MVT::i32); 3359 3360 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 3361 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 3362 } 3363 3364 if (Model == TLSModel::InitialExec) { 3365 bool IsPCRel = Subtarget.isUsingPCRelativeCalls(); 3366 SDValue TGA = DAG.getTargetGlobalAddress( 3367 GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0); 3368 SDValue TGATLS = DAG.getTargetGlobalAddress( 3369 GV, dl, PtrVT, 0, 3370 IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS); 3371 SDValue TPOffset; 3372 if (IsPCRel) { 3373 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA); 3374 TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel, 3375 MachinePointerInfo()); 3376 } else { 3377 SDValue GOTPtr; 3378 if (is64bit) { 3379 setUsesTOCBasePtr(DAG); 3380 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 3381 GOTPtr = 3382 DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA); 3383 } else { 3384 if (!TM.isPositionIndependent()) 3385 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 3386 else if (picLevel == PICLevel::SmallPIC) 3387 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 3388 else 3389 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 3390 } 3391 TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr); 3392 } 3393 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 3394 } 3395 3396 if (Model == TLSModel::GeneralDynamic) { 3397 if (Subtarget.isUsingPCRelativeCalls()) { 3398 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3399 PPCII::MO_GOT_TLSGD_PCREL_FLAG); 3400 return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA); 3401 } 3402 3403 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 3404 SDValue GOTPtr; 3405 if (is64bit) { 3406 setUsesTOCBasePtr(DAG); 3407 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 3408 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 3409 GOTReg, TGA); 3410 } else { 3411 if (picLevel == PICLevel::SmallPIC) 3412 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 3413 else 3414 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 3415 } 3416 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 3417 GOTPtr, TGA, TGA); 3418 } 3419 3420 if (Model == TLSModel::LocalDynamic) { 3421 if (Subtarget.isUsingPCRelativeCalls()) { 3422 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3423 PPCII::MO_GOT_TLSLD_PCREL_FLAG); 3424 SDValue MatPCRel = 3425 DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA); 3426 return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA); 3427 } 3428 3429 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 3430 SDValue GOTPtr; 3431 if (is64bit) { 3432 setUsesTOCBasePtr(DAG); 3433 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 3434 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 3435 GOTReg, TGA); 3436 } else { 3437 if (picLevel == PICLevel::SmallPIC) 3438 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 3439 else 3440 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 3441 } 3442 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 3443 PtrVT, GOTPtr, TGA, TGA); 3444 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 3445 PtrVT, TLSAddr, TGA); 3446 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 3447 } 3448 3449 llvm_unreachable("Unknown TLS model!"); 3450 } 3451 3452 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 3453 SelectionDAG &DAG) const { 3454 EVT PtrVT = Op.getValueType(); 3455 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 3456 SDLoc DL(GSDN); 3457 const GlobalValue *GV = GSDN->getGlobal(); 3458 3459 // 64-bit SVR4 ABI & AIX ABI code is always position-independent. 3460 // The actual address of the GlobalValue is stored in the TOC. 3461 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 3462 if (Subtarget.isUsingPCRelativeCalls()) { 3463 EVT Ty = getPointerTy(DAG.getDataLayout()); 3464 if (isAccessedAsGotIndirect(Op)) { 3465 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(), 3466 PPCII::MO_PCREL_FLAG | 3467 PPCII::MO_GOT_FLAG); 3468 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3469 SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel, 3470 MachinePointerInfo()); 3471 return Load; 3472 } else { 3473 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(), 3474 PPCII::MO_PCREL_FLAG); 3475 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA); 3476 } 3477 } 3478 setUsesTOCBasePtr(DAG); 3479 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 3480 return getTOCEntry(DAG, DL, GA); 3481 } 3482 3483 unsigned MOHiFlag, MOLoFlag; 3484 bool IsPIC = isPositionIndependent(); 3485 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 3486 3487 if (IsPIC && Subtarget.isSVR4ABI()) { 3488 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 3489 GSDN->getOffset(), 3490 PPCII::MO_PIC_FLAG); 3491 return getTOCEntry(DAG, DL, GA); 3492 } 3493 3494 SDValue GAHi = 3495 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 3496 SDValue GALo = 3497 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 3498 3499 return LowerLabelRef(GAHi, GALo, IsPIC, DAG); 3500 } 3501 3502 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 3503 bool IsStrict = Op->isStrictFPOpcode(); 3504 ISD::CondCode CC = 3505 cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get(); 3506 SDValue LHS = Op.getOperand(IsStrict ? 1 : 0); 3507 SDValue RHS = Op.getOperand(IsStrict ? 2 : 1); 3508 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); 3509 EVT LHSVT = LHS.getValueType(); 3510 SDLoc dl(Op); 3511 3512 // Soften the setcc with libcall if it is fp128. 3513 if (LHSVT == MVT::f128) { 3514 assert(!Subtarget.hasP9Vector() && 3515 "SETCC for f128 is already legal under Power9!"); 3516 softenSetCCOperands(DAG, LHSVT, LHS, RHS, CC, dl, LHS, RHS, Chain, 3517 Op->getOpcode() == ISD::STRICT_FSETCCS); 3518 if (RHS.getNode()) 3519 LHS = DAG.getNode(ISD::SETCC, dl, Op.getValueType(), LHS, RHS, 3520 DAG.getCondCode(CC)); 3521 if (IsStrict) 3522 return DAG.getMergeValues({LHS, Chain}, dl); 3523 return LHS; 3524 } 3525 3526 assert(!IsStrict && "Don't know how to handle STRICT_FSETCC!"); 3527 3528 if (Op.getValueType() == MVT::v2i64) { 3529 // When the operands themselves are v2i64 values, we need to do something 3530 // special because VSX has no underlying comparison operations for these. 3531 if (LHS.getValueType() == MVT::v2i64) { 3532 // Equality can be handled by casting to the legal type for Altivec 3533 // comparisons, everything else needs to be expanded. 3534 if (CC != ISD::SETEQ && CC != ISD::SETNE) 3535 return SDValue(); 3536 SDValue SetCC32 = DAG.getSetCC( 3537 dl, MVT::v4i32, DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, LHS), 3538 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, RHS), CC); 3539 int ShuffV[] = {1, 0, 3, 2}; 3540 SDValue Shuff = 3541 DAG.getVectorShuffle(MVT::v4i32, dl, SetCC32, SetCC32, ShuffV); 3542 return DAG.getBitcast(MVT::v2i64, 3543 DAG.getNode(CC == ISD::SETEQ ? ISD::AND : ISD::OR, 3544 dl, MVT::v4i32, Shuff, SetCC32)); 3545 } 3546 3547 // We handle most of these in the usual way. 3548 return Op; 3549 } 3550 3551 // If we're comparing for equality to zero, expose the fact that this is 3552 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 3553 // fold the new nodes. 3554 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 3555 return V; 3556 3557 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { 3558 // Leave comparisons against 0 and -1 alone for now, since they're usually 3559 // optimized. FIXME: revisit this when we can custom lower all setcc 3560 // optimizations. 3561 if (C->isAllOnes() || C->isZero()) 3562 return SDValue(); 3563 } 3564 3565 // If we have an integer seteq/setne, turn it into a compare against zero 3566 // by xor'ing the rhs with the lhs, which is faster than setting a 3567 // condition register, reading it back out, and masking the correct bit. The 3568 // normal approach here uses sub to do this instead of xor. Using xor exposes 3569 // the result to other bit-twiddling opportunities. 3570 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 3571 EVT VT = Op.getValueType(); 3572 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, LHS, RHS); 3573 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 3574 } 3575 return SDValue(); 3576 } 3577 3578 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 3579 SDNode *Node = Op.getNode(); 3580 EVT VT = Node->getValueType(0); 3581 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3582 SDValue InChain = Node->getOperand(0); 3583 SDValue VAListPtr = Node->getOperand(1); 3584 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 3585 SDLoc dl(Node); 3586 3587 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 3588 3589 // gpr_index 3590 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3591 VAListPtr, MachinePointerInfo(SV), MVT::i8); 3592 InChain = GprIndex.getValue(1); 3593 3594 if (VT == MVT::i64) { 3595 // Check if GprIndex is even 3596 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 3597 DAG.getConstant(1, dl, MVT::i32)); 3598 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 3599 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 3600 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 3601 DAG.getConstant(1, dl, MVT::i32)); 3602 // Align GprIndex to be even if it isn't 3603 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 3604 GprIndex); 3605 } 3606 3607 // fpr index is 1 byte after gpr 3608 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3609 DAG.getConstant(1, dl, MVT::i32)); 3610 3611 // fpr 3612 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3613 FprPtr, MachinePointerInfo(SV), MVT::i8); 3614 InChain = FprIndex.getValue(1); 3615 3616 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3617 DAG.getConstant(8, dl, MVT::i32)); 3618 3619 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3620 DAG.getConstant(4, dl, MVT::i32)); 3621 3622 // areas 3623 SDValue OverflowArea = 3624 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 3625 InChain = OverflowArea.getValue(1); 3626 3627 SDValue RegSaveArea = 3628 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 3629 InChain = RegSaveArea.getValue(1); 3630 3631 // select overflow_area if index > 8 3632 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 3633 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 3634 3635 // adjustment constant gpr_index * 4/8 3636 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 3637 VT.isInteger() ? GprIndex : FprIndex, 3638 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 3639 MVT::i32)); 3640 3641 // OurReg = RegSaveArea + RegConstant 3642 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 3643 RegConstant); 3644 3645 // Floating types are 32 bytes into RegSaveArea 3646 if (VT.isFloatingPoint()) 3647 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 3648 DAG.getConstant(32, dl, MVT::i32)); 3649 3650 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 3651 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 3652 VT.isInteger() ? GprIndex : FprIndex, 3653 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 3654 MVT::i32)); 3655 3656 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 3657 VT.isInteger() ? VAListPtr : FprPtr, 3658 MachinePointerInfo(SV), MVT::i8); 3659 3660 // determine if we should load from reg_save_area or overflow_area 3661 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 3662 3663 // increase overflow_area by 4/8 if gpr/fpr > 8 3664 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 3665 DAG.getConstant(VT.isInteger() ? 4 : 8, 3666 dl, MVT::i32)); 3667 3668 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 3669 OverflowAreaPlusN); 3670 3671 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 3672 MachinePointerInfo(), MVT::i32); 3673 3674 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 3675 } 3676 3677 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 3678 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 3679 3680 // We have to copy the entire va_list struct: 3681 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 3682 return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2), 3683 DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8), 3684 false, true, false, MachinePointerInfo(), 3685 MachinePointerInfo()); 3686 } 3687 3688 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 3689 SelectionDAG &DAG) const { 3690 if (Subtarget.isAIXABI()) 3691 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX."); 3692 3693 return Op.getOperand(0); 3694 } 3695 3696 SDValue PPCTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const { 3697 MachineFunction &MF = DAG.getMachineFunction(); 3698 PPCFunctionInfo &MFI = *MF.getInfo<PPCFunctionInfo>(); 3699 3700 assert((Op.getOpcode() == ISD::INLINEASM || 3701 Op.getOpcode() == ISD::INLINEASM_BR) && 3702 "Expecting Inline ASM node."); 3703 3704 // If an LR store is already known to be required then there is not point in 3705 // checking this ASM as well. 3706 if (MFI.isLRStoreRequired()) 3707 return Op; 3708 3709 // Inline ASM nodes have an optional last operand that is an incoming Flag of 3710 // type MVT::Glue. We want to ignore this last operand if that is the case. 3711 unsigned NumOps = Op.getNumOperands(); 3712 if (Op.getOperand(NumOps - 1).getValueType() == MVT::Glue) 3713 --NumOps; 3714 3715 // Check all operands that may contain the LR. 3716 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { 3717 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue(); 3718 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); 3719 ++i; // Skip the ID value. 3720 3721 switch (InlineAsm::getKind(Flags)) { 3722 default: 3723 llvm_unreachable("Bad flags!"); 3724 case InlineAsm::Kind_RegUse: 3725 case InlineAsm::Kind_Imm: 3726 case InlineAsm::Kind_Mem: 3727 i += NumVals; 3728 break; 3729 case InlineAsm::Kind_Clobber: 3730 case InlineAsm::Kind_RegDef: 3731 case InlineAsm::Kind_RegDefEarlyClobber: { 3732 for (; NumVals; --NumVals, ++i) { 3733 Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg(); 3734 if (Reg != PPC::LR && Reg != PPC::LR8) 3735 continue; 3736 MFI.setLRStoreRequired(); 3737 return Op; 3738 } 3739 break; 3740 } 3741 } 3742 } 3743 3744 return Op; 3745 } 3746 3747 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 3748 SelectionDAG &DAG) const { 3749 if (Subtarget.isAIXABI()) 3750 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX."); 3751 3752 SDValue Chain = Op.getOperand(0); 3753 SDValue Trmp = Op.getOperand(1); // trampoline 3754 SDValue FPtr = Op.getOperand(2); // nested function 3755 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 3756 SDLoc dl(Op); 3757 3758 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3759 bool isPPC64 = (PtrVT == MVT::i64); 3760 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 3761 3762 TargetLowering::ArgListTy Args; 3763 TargetLowering::ArgListEntry Entry; 3764 3765 Entry.Ty = IntPtrTy; 3766 Entry.Node = Trmp; Args.push_back(Entry); 3767 3768 // TrampSize == (isPPC64 ? 48 : 40); 3769 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 3770 isPPC64 ? MVT::i64 : MVT::i32); 3771 Args.push_back(Entry); 3772 3773 Entry.Node = FPtr; Args.push_back(Entry); 3774 Entry.Node = Nest; Args.push_back(Entry); 3775 3776 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 3777 TargetLowering::CallLoweringInfo CLI(DAG); 3778 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3779 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3780 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 3781 3782 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3783 return CallResult.second; 3784 } 3785 3786 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 3787 MachineFunction &MF = DAG.getMachineFunction(); 3788 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3789 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3790 3791 SDLoc dl(Op); 3792 3793 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) { 3794 // vastart just stores the address of the VarArgsFrameIndex slot into the 3795 // memory location argument. 3796 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3797 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3798 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3799 MachinePointerInfo(SV)); 3800 } 3801 3802 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 3803 // We suppose the given va_list is already allocated. 3804 // 3805 // typedef struct { 3806 // char gpr; /* index into the array of 8 GPRs 3807 // * stored in the register save area 3808 // * gpr=0 corresponds to r3, 3809 // * gpr=1 to r4, etc. 3810 // */ 3811 // char fpr; /* index into the array of 8 FPRs 3812 // * stored in the register save area 3813 // * fpr=0 corresponds to f1, 3814 // * fpr=1 to f2, etc. 3815 // */ 3816 // char *overflow_arg_area; 3817 // /* location on stack that holds 3818 // * the next overflow argument 3819 // */ 3820 // char *reg_save_area; 3821 // /* where r3:r10 and f1:f8 (if saved) 3822 // * are stored 3823 // */ 3824 // } va_list[1]; 3825 3826 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 3827 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 3828 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 3829 PtrVT); 3830 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 3831 PtrVT); 3832 3833 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 3834 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 3835 3836 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 3837 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 3838 3839 uint64_t FPROffset = 1; 3840 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 3841 3842 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3843 3844 // Store first byte : number of int regs 3845 SDValue firstStore = 3846 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 3847 MachinePointerInfo(SV), MVT::i8); 3848 uint64_t nextOffset = FPROffset; 3849 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 3850 ConstFPROffset); 3851 3852 // Store second byte : number of float regs 3853 SDValue secondStore = 3854 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 3855 MachinePointerInfo(SV, nextOffset), MVT::i8); 3856 nextOffset += StackOffset; 3857 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 3858 3859 // Store second word : arguments given on stack 3860 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 3861 MachinePointerInfo(SV, nextOffset)); 3862 nextOffset += FrameOffset; 3863 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 3864 3865 // Store third word : arguments given in registers 3866 return DAG.getStore(thirdStore, dl, FR, nextPtr, 3867 MachinePointerInfo(SV, nextOffset)); 3868 } 3869 3870 /// FPR - The set of FP registers that should be allocated for arguments 3871 /// on Darwin and AIX. 3872 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3873 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3874 PPC::F11, PPC::F12, PPC::F13}; 3875 3876 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3877 /// the stack. 3878 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3879 unsigned PtrByteSize) { 3880 unsigned ArgSize = ArgVT.getStoreSize(); 3881 if (Flags.isByVal()) 3882 ArgSize = Flags.getByValSize(); 3883 3884 // Round up to multiples of the pointer size, except for array members, 3885 // which are always packed. 3886 if (!Flags.isInConsecutiveRegs()) 3887 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3888 3889 return ArgSize; 3890 } 3891 3892 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3893 /// on the stack. 3894 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3895 ISD::ArgFlagsTy Flags, 3896 unsigned PtrByteSize) { 3897 Align Alignment(PtrByteSize); 3898 3899 // Altivec parameters are padded to a 16 byte boundary. 3900 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3901 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3902 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3903 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3904 Alignment = Align(16); 3905 3906 // ByVal parameters are aligned as requested. 3907 if (Flags.isByVal()) { 3908 auto BVAlign = Flags.getNonZeroByValAlign(); 3909 if (BVAlign > PtrByteSize) { 3910 if (BVAlign.value() % PtrByteSize != 0) 3911 llvm_unreachable( 3912 "ByVal alignment is not a multiple of the pointer size"); 3913 3914 Alignment = BVAlign; 3915 } 3916 } 3917 3918 // Array members are always packed to their original alignment. 3919 if (Flags.isInConsecutiveRegs()) { 3920 // If the array member was split into multiple registers, the first 3921 // needs to be aligned to the size of the full type. (Except for 3922 // ppcf128, which is only aligned as its f64 components.) 3923 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3924 Alignment = Align(OrigVT.getStoreSize()); 3925 else 3926 Alignment = Align(ArgVT.getStoreSize()); 3927 } 3928 3929 return Alignment; 3930 } 3931 3932 /// CalculateStackSlotUsed - Return whether this argument will use its 3933 /// stack slot (instead of being passed in registers). ArgOffset, 3934 /// AvailableFPRs, and AvailableVRs must hold the current argument 3935 /// position, and will be updated to account for this argument. 3936 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, 3937 unsigned PtrByteSize, unsigned LinkageSize, 3938 unsigned ParamAreaSize, unsigned &ArgOffset, 3939 unsigned &AvailableFPRs, 3940 unsigned &AvailableVRs) { 3941 bool UseMemory = false; 3942 3943 // Respect alignment of argument on the stack. 3944 Align Alignment = 3945 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3946 ArgOffset = alignTo(ArgOffset, Alignment); 3947 // If there's no space left in the argument save area, we must 3948 // use memory (this check also catches zero-sized arguments). 3949 if (ArgOffset >= LinkageSize + ParamAreaSize) 3950 UseMemory = true; 3951 3952 // Allocate argument on the stack. 3953 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3954 if (Flags.isInConsecutiveRegsLast()) 3955 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3956 // If we overran the argument save area, we must use memory 3957 // (this check catches arguments passed partially in memory) 3958 if (ArgOffset > LinkageSize + ParamAreaSize) 3959 UseMemory = true; 3960 3961 // However, if the argument is actually passed in an FPR or a VR, 3962 // we don't use memory after all. 3963 if (!Flags.isByVal()) { 3964 if (ArgVT == MVT::f32 || ArgVT == MVT::f64) 3965 if (AvailableFPRs > 0) { 3966 --AvailableFPRs; 3967 return false; 3968 } 3969 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3970 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3971 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3972 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3973 if (AvailableVRs > 0) { 3974 --AvailableVRs; 3975 return false; 3976 } 3977 } 3978 3979 return UseMemory; 3980 } 3981 3982 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3983 /// ensure minimum alignment required for target. 3984 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3985 unsigned NumBytes) { 3986 return alignTo(NumBytes, Lowering->getStackAlign()); 3987 } 3988 3989 SDValue PPCTargetLowering::LowerFormalArguments( 3990 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3991 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3992 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3993 if (Subtarget.isAIXABI()) 3994 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG, 3995 InVals); 3996 if (Subtarget.is64BitELFABI()) 3997 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3998 InVals); 3999 assert(Subtarget.is32BitELFABI()); 4000 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 4001 InVals); 4002 } 4003 4004 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 4005 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 4006 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4007 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4008 4009 // 32-bit SVR4 ABI Stack Frame Layout: 4010 // +-----------------------------------+ 4011 // +--> | Back chain | 4012 // | +-----------------------------------+ 4013 // | | Floating-point register save area | 4014 // | +-----------------------------------+ 4015 // | | General register save area | 4016 // | +-----------------------------------+ 4017 // | | CR save word | 4018 // | +-----------------------------------+ 4019 // | | VRSAVE save word | 4020 // | +-----------------------------------+ 4021 // | | Alignment padding | 4022 // | +-----------------------------------+ 4023 // | | Vector register save area | 4024 // | +-----------------------------------+ 4025 // | | Local variable space | 4026 // | +-----------------------------------+ 4027 // | | Parameter list area | 4028 // | +-----------------------------------+ 4029 // | | LR save word | 4030 // | +-----------------------------------+ 4031 // SP--> +--- | Back chain | 4032 // +-----------------------------------+ 4033 // 4034 // Specifications: 4035 // System V Application Binary Interface PowerPC Processor Supplement 4036 // AltiVec Technology Programming Interface Manual 4037 4038 MachineFunction &MF = DAG.getMachineFunction(); 4039 MachineFrameInfo &MFI = MF.getFrameInfo(); 4040 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4041 4042 EVT PtrVT = getPointerTy(MF.getDataLayout()); 4043 // Potential tail calls could cause overwriting of argument stack slots. 4044 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 4045 (CallConv == CallingConv::Fast)); 4046 const Align PtrAlign(4); 4047 4048 // Assign locations to all of the incoming arguments. 4049 SmallVector<CCValAssign, 16> ArgLocs; 4050 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4051 *DAG.getContext()); 4052 4053 // Reserve space for the linkage area on the stack. 4054 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4055 CCInfo.AllocateStack(LinkageSize, PtrAlign); 4056 if (useSoftFloat()) 4057 CCInfo.PreAnalyzeFormalArguments(Ins); 4058 4059 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 4060 CCInfo.clearWasPPCF128(); 4061 4062 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 4063 CCValAssign &VA = ArgLocs[i]; 4064 4065 // Arguments stored in registers. 4066 if (VA.isRegLoc()) { 4067 const TargetRegisterClass *RC; 4068 EVT ValVT = VA.getValVT(); 4069 4070 switch (ValVT.getSimpleVT().SimpleTy) { 4071 default: 4072 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 4073 case MVT::i1: 4074 case MVT::i32: 4075 RC = &PPC::GPRCRegClass; 4076 break; 4077 case MVT::f32: 4078 if (Subtarget.hasP8Vector()) 4079 RC = &PPC::VSSRCRegClass; 4080 else if (Subtarget.hasSPE()) 4081 RC = &PPC::GPRCRegClass; 4082 else 4083 RC = &PPC::F4RCRegClass; 4084 break; 4085 case MVT::f64: 4086 if (Subtarget.hasVSX()) 4087 RC = &PPC::VSFRCRegClass; 4088 else if (Subtarget.hasSPE()) 4089 // SPE passes doubles in GPR pairs. 4090 RC = &PPC::GPRCRegClass; 4091 else 4092 RC = &PPC::F8RCRegClass; 4093 break; 4094 case MVT::v16i8: 4095 case MVT::v8i16: 4096 case MVT::v4i32: 4097 RC = &PPC::VRRCRegClass; 4098 break; 4099 case MVT::v4f32: 4100 RC = &PPC::VRRCRegClass; 4101 break; 4102 case MVT::v2f64: 4103 case MVT::v2i64: 4104 RC = &PPC::VRRCRegClass; 4105 break; 4106 } 4107 4108 SDValue ArgValue; 4109 // Transform the arguments stored in physical registers into 4110 // virtual ones. 4111 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) { 4112 assert(i + 1 < e && "No second half of double precision argument"); 4113 Register RegLo = MF.addLiveIn(VA.getLocReg(), RC); 4114 Register RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC); 4115 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32); 4116 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32); 4117 if (!Subtarget.isLittleEndian()) 4118 std::swap (ArgValueLo, ArgValueHi); 4119 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo, 4120 ArgValueHi); 4121 } else { 4122 Register Reg = MF.addLiveIn(VA.getLocReg(), RC); 4123 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 4124 ValVT == MVT::i1 ? MVT::i32 : ValVT); 4125 if (ValVT == MVT::i1) 4126 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 4127 } 4128 4129 InVals.push_back(ArgValue); 4130 } else { 4131 // Argument stored in memory. 4132 assert(VA.isMemLoc()); 4133 4134 // Get the extended size of the argument type in stack 4135 unsigned ArgSize = VA.getLocVT().getStoreSize(); 4136 // Get the actual size of the argument type 4137 unsigned ObjSize = VA.getValVT().getStoreSize(); 4138 unsigned ArgOffset = VA.getLocMemOffset(); 4139 // Stack objects in PPC32 are right justified. 4140 ArgOffset += ArgSize - ObjSize; 4141 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable); 4142 4143 // Create load nodes to retrieve arguments from the stack. 4144 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4145 InVals.push_back( 4146 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 4147 } 4148 } 4149 4150 // Assign locations to all of the incoming aggregate by value arguments. 4151 // Aggregates passed by value are stored in the local variable space of the 4152 // caller's stack frame, right above the parameter list area. 4153 SmallVector<CCValAssign, 16> ByValArgLocs; 4154 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4155 ByValArgLocs, *DAG.getContext()); 4156 4157 // Reserve stack space for the allocations in CCInfo. 4158 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); 4159 4160 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 4161 4162 // Area that is at least reserved in the caller of this function. 4163 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 4164 MinReservedArea = std::max(MinReservedArea, LinkageSize); 4165 4166 // Set the size that is at least reserved in caller of this function. Tail 4167 // call optimized function's reserved stack space needs to be aligned so that 4168 // taking the difference between two stack areas will result in an aligned 4169 // stack. 4170 MinReservedArea = 4171 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4172 FuncInfo->setMinReservedArea(MinReservedArea); 4173 4174 SmallVector<SDValue, 8> MemOps; 4175 4176 // If the function takes variable number of arguments, make a frame index for 4177 // the start of the first vararg value... for expansion of llvm.va_start. 4178 if (isVarArg) { 4179 static const MCPhysReg GPArgRegs[] = { 4180 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4181 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4182 }; 4183 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 4184 4185 static const MCPhysReg FPArgRegs[] = { 4186 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 4187 PPC::F8 4188 }; 4189 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 4190 4191 if (useSoftFloat() || hasSPE()) 4192 NumFPArgRegs = 0; 4193 4194 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 4195 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 4196 4197 // Make room for NumGPArgRegs and NumFPArgRegs. 4198 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 4199 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 4200 4201 FuncInfo->setVarArgsStackOffset( 4202 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4203 CCInfo.getNextStackOffset(), true)); 4204 4205 FuncInfo->setVarArgsFrameIndex( 4206 MFI.CreateStackObject(Depth, Align(8), false)); 4207 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4208 4209 // The fixed integer arguments of a variadic function are stored to the 4210 // VarArgsFrameIndex on the stack so that they may be loaded by 4211 // dereferencing the result of va_next. 4212 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 4213 // Get an existing live-in vreg, or add a new one. 4214 Register VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 4215 if (!VReg) 4216 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 4217 4218 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4219 SDValue Store = 4220 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4221 MemOps.push_back(Store); 4222 // Increment the address by four for the next argument to store 4223 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4224 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4225 } 4226 4227 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 4228 // is set. 4229 // The double arguments are stored to the VarArgsFrameIndex 4230 // on the stack. 4231 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 4232 // Get an existing live-in vreg, or add a new one. 4233 Register VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 4234 if (!VReg) 4235 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 4236 4237 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 4238 SDValue Store = 4239 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4240 MemOps.push_back(Store); 4241 // Increment the address by eight for the next argument to store 4242 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 4243 PtrVT); 4244 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4245 } 4246 } 4247 4248 if (!MemOps.empty()) 4249 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4250 4251 return Chain; 4252 } 4253 4254 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4255 // value to MVT::i64 and then truncate to the correct register size. 4256 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 4257 EVT ObjectVT, SelectionDAG &DAG, 4258 SDValue ArgVal, 4259 const SDLoc &dl) const { 4260 if (Flags.isSExt()) 4261 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 4262 DAG.getValueType(ObjectVT)); 4263 else if (Flags.isZExt()) 4264 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 4265 DAG.getValueType(ObjectVT)); 4266 4267 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 4268 } 4269 4270 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 4271 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 4272 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4273 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4274 // TODO: add description of PPC stack frame format, or at least some docs. 4275 // 4276 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4277 bool isLittleEndian = Subtarget.isLittleEndian(); 4278 MachineFunction &MF = DAG.getMachineFunction(); 4279 MachineFrameInfo &MFI = MF.getFrameInfo(); 4280 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4281 4282 assert(!(CallConv == CallingConv::Fast && isVarArg) && 4283 "fastcc not supported on varargs functions"); 4284 4285 EVT PtrVT = getPointerTy(MF.getDataLayout()); 4286 // Potential tail calls could cause overwriting of argument stack slots. 4287 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 4288 (CallConv == CallingConv::Fast)); 4289 unsigned PtrByteSize = 8; 4290 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4291 4292 static const MCPhysReg GPR[] = { 4293 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4294 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4295 }; 4296 static const MCPhysReg VR[] = { 4297 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4298 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4299 }; 4300 4301 const unsigned Num_GPR_Regs = array_lengthof(GPR); 4302 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 4303 const unsigned Num_VR_Regs = array_lengthof(VR); 4304 4305 // Do a first pass over the arguments to determine whether the ABI 4306 // guarantees that our caller has allocated the parameter save area 4307 // on its stack frame. In the ELFv1 ABI, this is always the case; 4308 // in the ELFv2 ABI, it is true if this is a vararg function or if 4309 // any parameter is located in a stack slot. 4310 4311 bool HasParameterArea = !isELFv2ABI || isVarArg; 4312 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 4313 unsigned NumBytes = LinkageSize; 4314 unsigned AvailableFPRs = Num_FPR_Regs; 4315 unsigned AvailableVRs = Num_VR_Regs; 4316 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 4317 if (Ins[i].Flags.isNest()) 4318 continue; 4319 4320 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 4321 PtrByteSize, LinkageSize, ParamAreaSize, 4322 NumBytes, AvailableFPRs, AvailableVRs)) 4323 HasParameterArea = true; 4324 } 4325 4326 // Add DAG nodes to load the arguments or copy them out of registers. On 4327 // entry to a function on PPC, the arguments start after the linkage area, 4328 // although the first ones are often in registers. 4329 4330 unsigned ArgOffset = LinkageSize; 4331 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4332 SmallVector<SDValue, 8> MemOps; 4333 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 4334 unsigned CurArgIdx = 0; 4335 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 4336 SDValue ArgVal; 4337 bool needsLoad = false; 4338 EVT ObjectVT = Ins[ArgNo].VT; 4339 EVT OrigVT = Ins[ArgNo].ArgVT; 4340 unsigned ObjSize = ObjectVT.getStoreSize(); 4341 unsigned ArgSize = ObjSize; 4342 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4343 if (Ins[ArgNo].isOrigArg()) { 4344 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4345 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4346 } 4347 // We re-align the argument offset for each argument, except when using the 4348 // fast calling convention, when we need to make sure we do that only when 4349 // we'll actually use a stack slot. 4350 unsigned CurArgOffset; 4351 Align Alignment; 4352 auto ComputeArgOffset = [&]() { 4353 /* Respect alignment of argument on the stack. */ 4354 Alignment = 4355 CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 4356 ArgOffset = alignTo(ArgOffset, Alignment); 4357 CurArgOffset = ArgOffset; 4358 }; 4359 4360 if (CallConv != CallingConv::Fast) { 4361 ComputeArgOffset(); 4362 4363 /* Compute GPR index associated with argument offset. */ 4364 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4365 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 4366 } 4367 4368 // FIXME the codegen can be much improved in some cases. 4369 // We do not have to keep everything in memory. 4370 if (Flags.isByVal()) { 4371 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4372 4373 if (CallConv == CallingConv::Fast) 4374 ComputeArgOffset(); 4375 4376 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4377 ObjSize = Flags.getByValSize(); 4378 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4379 // Empty aggregate parameters do not take up registers. Examples: 4380 // struct { } a; 4381 // union { } b; 4382 // int c[0]; 4383 // etc. However, we have to provide a place-holder in InVals, so 4384 // pretend we have an 8-byte item at the current address for that 4385 // purpose. 4386 if (!ObjSize) { 4387 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4388 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4389 InVals.push_back(FIN); 4390 continue; 4391 } 4392 4393 // Create a stack object covering all stack doublewords occupied 4394 // by the argument. If the argument is (fully or partially) on 4395 // the stack, or if the argument is fully in registers but the 4396 // caller has allocated the parameter save anyway, we can refer 4397 // directly to the caller's stack frame. Otherwise, create a 4398 // local copy in our own frame. 4399 int FI; 4400 if (HasParameterArea || 4401 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 4402 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 4403 else 4404 FI = MFI.CreateStackObject(ArgSize, Alignment, false); 4405 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4406 4407 // Handle aggregates smaller than 8 bytes. 4408 if (ObjSize < PtrByteSize) { 4409 // The value of the object is its address, which differs from the 4410 // address of the enclosing doubleword on big-endian systems. 4411 SDValue Arg = FIN; 4412 if (!isLittleEndian) { 4413 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 4414 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 4415 } 4416 InVals.push_back(Arg); 4417 4418 if (GPR_idx != Num_GPR_Regs) { 4419 Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 4420 FuncInfo->addLiveInAttr(VReg, Flags); 4421 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4422 EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), ObjSize * 8); 4423 SDValue Store = 4424 DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 4425 MachinePointerInfo(&*FuncArg), ObjType); 4426 MemOps.push_back(Store); 4427 } 4428 // Whether we copied from a register or not, advance the offset 4429 // into the parameter save area by a full doubleword. 4430 ArgOffset += PtrByteSize; 4431 continue; 4432 } 4433 4434 // The value of the object is its address, which is the address of 4435 // its first stack doubleword. 4436 InVals.push_back(FIN); 4437 4438 // Store whatever pieces of the object are in registers to memory. 4439 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4440 if (GPR_idx == Num_GPR_Regs) 4441 break; 4442 4443 Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4444 FuncInfo->addLiveInAttr(VReg, Flags); 4445 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4446 SDValue Addr = FIN; 4447 if (j) { 4448 SDValue Off = DAG.getConstant(j, dl, PtrVT); 4449 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 4450 } 4451 unsigned StoreSizeInBits = std::min(PtrByteSize, (ObjSize - j)) * 8; 4452 EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), StoreSizeInBits); 4453 SDValue Store = 4454 DAG.getTruncStore(Val.getValue(1), dl, Val, Addr, 4455 MachinePointerInfo(&*FuncArg, j), ObjType); 4456 MemOps.push_back(Store); 4457 ++GPR_idx; 4458 } 4459 ArgOffset += ArgSize; 4460 continue; 4461 } 4462 4463 switch (ObjectVT.getSimpleVT().SimpleTy) { 4464 default: llvm_unreachable("Unhandled argument type!"); 4465 case MVT::i1: 4466 case MVT::i32: 4467 case MVT::i64: 4468 if (Flags.isNest()) { 4469 // The 'nest' parameter, if any, is passed in R11. 4470 Register VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 4471 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4472 4473 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4474 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4475 4476 break; 4477 } 4478 4479 // These can be scalar arguments or elements of an integer array type 4480 // passed directly. Clang may use those instead of "byval" aggregate 4481 // types to avoid forcing arguments to memory unnecessarily. 4482 if (GPR_idx != Num_GPR_Regs) { 4483 Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 4484 FuncInfo->addLiveInAttr(VReg, Flags); 4485 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4486 4487 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4488 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4489 // value to MVT::i64 and then truncate to the correct register size. 4490 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4491 } else { 4492 if (CallConv == CallingConv::Fast) 4493 ComputeArgOffset(); 4494 4495 needsLoad = true; 4496 ArgSize = PtrByteSize; 4497 } 4498 if (CallConv != CallingConv::Fast || needsLoad) 4499 ArgOffset += 8; 4500 break; 4501 4502 case MVT::f32: 4503 case MVT::f64: 4504 // These can be scalar arguments or elements of a float array type 4505 // passed directly. The latter are used to implement ELFv2 homogenous 4506 // float aggregates. 4507 if (FPR_idx != Num_FPR_Regs) { 4508 unsigned VReg; 4509 4510 if (ObjectVT == MVT::f32) 4511 VReg = MF.addLiveIn(FPR[FPR_idx], 4512 Subtarget.hasP8Vector() 4513 ? &PPC::VSSRCRegClass 4514 : &PPC::F4RCRegClass); 4515 else 4516 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 4517 ? &PPC::VSFRCRegClass 4518 : &PPC::F8RCRegClass); 4519 4520 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4521 ++FPR_idx; 4522 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 4523 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 4524 // once we support fp <-> gpr moves. 4525 4526 // This can only ever happen in the presence of f32 array types, 4527 // since otherwise we never run out of FPRs before running out 4528 // of GPRs. 4529 Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 4530 FuncInfo->addLiveInAttr(VReg, Flags); 4531 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4532 4533 if (ObjectVT == MVT::f32) { 4534 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 4535 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 4536 DAG.getConstant(32, dl, MVT::i32)); 4537 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 4538 } 4539 4540 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 4541 } else { 4542 if (CallConv == CallingConv::Fast) 4543 ComputeArgOffset(); 4544 4545 needsLoad = true; 4546 } 4547 4548 // When passing an array of floats, the array occupies consecutive 4549 // space in the argument area; only round up to the next doubleword 4550 // at the end of the array. Otherwise, each float takes 8 bytes. 4551 if (CallConv != CallingConv::Fast || needsLoad) { 4552 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 4553 ArgOffset += ArgSize; 4554 if (Flags.isInConsecutiveRegsLast()) 4555 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4556 } 4557 break; 4558 case MVT::v4f32: 4559 case MVT::v4i32: 4560 case MVT::v8i16: 4561 case MVT::v16i8: 4562 case MVT::v2f64: 4563 case MVT::v2i64: 4564 case MVT::v1i128: 4565 case MVT::f128: 4566 // These can be scalar arguments or elements of a vector array type 4567 // passed directly. The latter are used to implement ELFv2 homogenous 4568 // vector aggregates. 4569 if (VR_idx != Num_VR_Regs) { 4570 Register VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4571 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4572 ++VR_idx; 4573 } else { 4574 if (CallConv == CallingConv::Fast) 4575 ComputeArgOffset(); 4576 needsLoad = true; 4577 } 4578 if (CallConv != CallingConv::Fast || needsLoad) 4579 ArgOffset += 16; 4580 break; 4581 } 4582 4583 // We need to load the argument to a virtual register if we determined 4584 // above that we ran out of physical registers of the appropriate type. 4585 if (needsLoad) { 4586 if (ObjSize < ArgSize && !isLittleEndian) 4587 CurArgOffset += ArgSize - ObjSize; 4588 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 4589 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4590 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4591 } 4592 4593 InVals.push_back(ArgVal); 4594 } 4595 4596 // Area that is at least reserved in the caller of this function. 4597 unsigned MinReservedArea; 4598 if (HasParameterArea) 4599 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 4600 else 4601 MinReservedArea = LinkageSize; 4602 4603 // Set the size that is at least reserved in caller of this function. Tail 4604 // call optimized functions' reserved stack space needs to be aligned so that 4605 // taking the difference between two stack areas will result in an aligned 4606 // stack. 4607 MinReservedArea = 4608 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4609 FuncInfo->setMinReservedArea(MinReservedArea); 4610 4611 // If the function takes variable number of arguments, make a frame index for 4612 // the start of the first vararg value... for expansion of llvm.va_start. 4613 // On ELFv2ABI spec, it writes: 4614 // C programs that are intended to be *portable* across different compilers 4615 // and architectures must use the header file <stdarg.h> to deal with variable 4616 // argument lists. 4617 if (isVarArg && MFI.hasVAStart()) { 4618 int Depth = ArgOffset; 4619 4620 FuncInfo->setVarArgsFrameIndex( 4621 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 4622 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4623 4624 // If this function is vararg, store any remaining integer argument regs 4625 // to their spots on the stack so that they may be loaded by dereferencing 4626 // the result of va_next. 4627 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4628 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 4629 Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4630 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4631 SDValue Store = 4632 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4633 MemOps.push_back(Store); 4634 // Increment the address by four for the next argument to store 4635 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 4636 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4637 } 4638 } 4639 4640 if (!MemOps.empty()) 4641 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4642 4643 return Chain; 4644 } 4645 4646 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4647 /// adjusted to accommodate the arguments for the tailcall. 4648 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4649 unsigned ParamSize) { 4650 4651 if (!isTailCall) return 0; 4652 4653 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4654 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4655 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4656 // Remember only if the new adjustment is bigger. 4657 if (SPDiff < FI->getTailCallSPDelta()) 4658 FI->setTailCallSPDelta(SPDiff); 4659 4660 return SPDiff; 4661 } 4662 4663 static bool isFunctionGlobalAddress(SDValue Callee); 4664 4665 static bool callsShareTOCBase(const Function *Caller, SDValue Callee, 4666 const TargetMachine &TM) { 4667 // It does not make sense to call callsShareTOCBase() with a caller that 4668 // is PC Relative since PC Relative callers do not have a TOC. 4669 #ifndef NDEBUG 4670 const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller); 4671 assert(!STICaller->isUsingPCRelativeCalls() && 4672 "PC Relative callers do not have a TOC and cannot share a TOC Base"); 4673 #endif 4674 4675 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols 4676 // don't have enough information to determine if the caller and callee share 4677 // the same TOC base, so we have to pessimistically assume they don't for 4678 // correctness. 4679 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4680 if (!G) 4681 return false; 4682 4683 const GlobalValue *GV = G->getGlobal(); 4684 4685 // If the callee is preemptable, then the static linker will use a plt-stub 4686 // which saves the toc to the stack, and needs a nop after the call 4687 // instruction to convert to a toc-restore. 4688 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4689 return false; 4690 4691 // Functions with PC Relative enabled may clobber the TOC in the same DSO. 4692 // We may need a TOC restore in the situation where the caller requires a 4693 // valid TOC but the callee is PC Relative and does not. 4694 const Function *F = dyn_cast<Function>(GV); 4695 const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV); 4696 4697 // If we have an Alias we can try to get the function from there. 4698 if (Alias) { 4699 const GlobalObject *GlobalObj = Alias->getAliaseeObject(); 4700 F = dyn_cast<Function>(GlobalObj); 4701 } 4702 4703 // If we still have no valid function pointer we do not have enough 4704 // information to determine if the callee uses PC Relative calls so we must 4705 // assume that it does. 4706 if (!F) 4707 return false; 4708 4709 // If the callee uses PC Relative we cannot guarantee that the callee won't 4710 // clobber the TOC of the caller and so we must assume that the two 4711 // functions do not share a TOC base. 4712 const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F); 4713 if (STICallee->isUsingPCRelativeCalls()) 4714 return false; 4715 4716 // If the GV is not a strong definition then we need to assume it can be 4717 // replaced by another function at link time. The function that replaces 4718 // it may not share the same TOC as the caller since the callee may be 4719 // replaced by a PC Relative version of the same function. 4720 if (!GV->isStrongDefinitionForLinker()) 4721 return false; 4722 4723 // The medium and large code models are expected to provide a sufficiently 4724 // large TOC to provide all data addressing needs of a module with a 4725 // single TOC. 4726 if (CodeModel::Medium == TM.getCodeModel() || 4727 CodeModel::Large == TM.getCodeModel()) 4728 return true; 4729 4730 // Any explicitly-specified sections and section prefixes must also match. 4731 // Also, if we're using -ffunction-sections, then each function is always in 4732 // a different section (the same is true for COMDAT functions). 4733 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4734 GV->getSection() != Caller->getSection()) 4735 return false; 4736 if (const auto *F = dyn_cast<Function>(GV)) { 4737 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4738 return false; 4739 } 4740 4741 return true; 4742 } 4743 4744 static bool 4745 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4746 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4747 assert(Subtarget.is64BitELFABI()); 4748 4749 const unsigned PtrByteSize = 8; 4750 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4751 4752 static const MCPhysReg GPR[] = { 4753 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4754 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4755 }; 4756 static const MCPhysReg VR[] = { 4757 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4758 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4759 }; 4760 4761 const unsigned NumGPRs = array_lengthof(GPR); 4762 const unsigned NumFPRs = 13; 4763 const unsigned NumVRs = array_lengthof(VR); 4764 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4765 4766 unsigned NumBytes = LinkageSize; 4767 unsigned AvailableFPRs = NumFPRs; 4768 unsigned AvailableVRs = NumVRs; 4769 4770 for (const ISD::OutputArg& Param : Outs) { 4771 if (Param.Flags.isNest()) continue; 4772 4773 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize, 4774 LinkageSize, ParamAreaSize, NumBytes, 4775 AvailableFPRs, AvailableVRs)) 4776 return true; 4777 } 4778 return false; 4779 } 4780 4781 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) { 4782 if (CB.arg_size() != CallerFn->arg_size()) 4783 return false; 4784 4785 auto CalleeArgIter = CB.arg_begin(); 4786 auto CalleeArgEnd = CB.arg_end(); 4787 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4788 4789 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4790 const Value* CalleeArg = *CalleeArgIter; 4791 const Value* CallerArg = &(*CallerArgIter); 4792 if (CalleeArg == CallerArg) 4793 continue; 4794 4795 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4796 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4797 // } 4798 // 1st argument of callee is undef and has the same type as caller. 4799 if (CalleeArg->getType() == CallerArg->getType() && 4800 isa<UndefValue>(CalleeArg)) 4801 continue; 4802 4803 return false; 4804 } 4805 4806 return true; 4807 } 4808 4809 // Returns true if TCO is possible between the callers and callees 4810 // calling conventions. 4811 static bool 4812 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4813 CallingConv::ID CalleeCC) { 4814 // Tail calls are possible with fastcc and ccc. 4815 auto isTailCallableCC = [] (CallingConv::ID CC){ 4816 return CC == CallingConv::C || CC == CallingConv::Fast; 4817 }; 4818 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4819 return false; 4820 4821 // We can safely tail call both fastcc and ccc callees from a c calling 4822 // convention caller. If the caller is fastcc, we may have less stack space 4823 // than a non-fastcc caller with the same signature so disable tail-calls in 4824 // that case. 4825 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4826 } 4827 4828 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4829 SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg, 4830 const SmallVectorImpl<ISD::OutputArg> &Outs, 4831 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { 4832 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4833 4834 if (DisableSCO && !TailCallOpt) return false; 4835 4836 // Variadic argument functions are not supported. 4837 if (isVarArg) return false; 4838 4839 auto &Caller = DAG.getMachineFunction().getFunction(); 4840 // Check that the calling conventions are compatible for tco. 4841 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4842 return false; 4843 4844 // Caller contains any byval parameter is not supported. 4845 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4846 return false; 4847 4848 // Callee contains any byval parameter is not supported, too. 4849 // Note: This is a quick work around, because in some cases, e.g. 4850 // caller's stack size > callee's stack size, we are still able to apply 4851 // sibling call optimization. For example, gcc is able to do SCO for caller1 4852 // in the following example, but not for caller2. 4853 // struct test { 4854 // long int a; 4855 // char ary[56]; 4856 // } gTest; 4857 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4858 // b->a = v.a; 4859 // return 0; 4860 // } 4861 // void caller1(struct test a, struct test c, struct test *b) { 4862 // callee(gTest, b); } 4863 // void caller2(struct test *b) { callee(gTest, b); } 4864 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4865 return false; 4866 4867 // If callee and caller use different calling conventions, we cannot pass 4868 // parameters on stack since offsets for the parameter area may be different. 4869 if (Caller.getCallingConv() != CalleeCC && 4870 needStackSlotPassParameters(Subtarget, Outs)) 4871 return false; 4872 4873 // All variants of 64-bit ELF ABIs without PC-Relative addressing require that 4874 // the caller and callee share the same TOC for TCO/SCO. If the caller and 4875 // callee potentially have different TOC bases then we cannot tail call since 4876 // we need to restore the TOC pointer after the call. 4877 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4878 // We cannot guarantee this for indirect calls or calls to external functions. 4879 // When PC-Relative addressing is used, the concept of the TOC is no longer 4880 // applicable so this check is not required. 4881 // Check first for indirect calls. 4882 if (!Subtarget.isUsingPCRelativeCalls() && 4883 !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee)) 4884 return false; 4885 4886 // Check if we share the TOC base. 4887 if (!Subtarget.isUsingPCRelativeCalls() && 4888 !callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4889 return false; 4890 4891 // TCO allows altering callee ABI, so we don't have to check further. 4892 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4893 return true; 4894 4895 if (DisableSCO) return false; 4896 4897 // If callee use the same argument list that caller is using, then we can 4898 // apply SCO on this case. If it is not, then we need to check if callee needs 4899 // stack for passing arguments. 4900 // PC Relative tail calls may not have a CallBase. 4901 // If there is no CallBase we cannot verify if we have the same argument 4902 // list so assume that we don't have the same argument list. 4903 if (CB && !hasSameArgumentList(&Caller, *CB) && 4904 needStackSlotPassParameters(Subtarget, Outs)) 4905 return false; 4906 else if (!CB && needStackSlotPassParameters(Subtarget, Outs)) 4907 return false; 4908 4909 return true; 4910 } 4911 4912 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4913 /// for tail call optimization. Targets which want to do tail call 4914 /// optimization should implement this function. 4915 bool 4916 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4917 CallingConv::ID CalleeCC, 4918 bool isVarArg, 4919 const SmallVectorImpl<ISD::InputArg> &Ins, 4920 SelectionDAG& DAG) const { 4921 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4922 return false; 4923 4924 // Variable argument functions are not supported. 4925 if (isVarArg) 4926 return false; 4927 4928 MachineFunction &MF = DAG.getMachineFunction(); 4929 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4930 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4931 // Functions containing by val parameters are not supported. 4932 for (unsigned i = 0; i != Ins.size(); i++) { 4933 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4934 if (Flags.isByVal()) return false; 4935 } 4936 4937 // Non-PIC/GOT tail calls are supported. 4938 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4939 return true; 4940 4941 // At the moment we can only do local tail calls (in same module, hidden 4942 // or protected) if we are generating PIC. 4943 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4944 return G->getGlobal()->hasHiddenVisibility() 4945 || G->getGlobal()->hasProtectedVisibility(); 4946 } 4947 4948 return false; 4949 } 4950 4951 /// isCallCompatibleAddress - Return the immediate to use if the specified 4952 /// 32-bit value is representable in the immediate field of a BxA instruction. 4953 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4954 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4955 if (!C) return nullptr; 4956 4957 int Addr = C->getZExtValue(); 4958 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4959 SignExtend32<26>(Addr) != Addr) 4960 return nullptr; // Top 6 bits have to be sext of immediate. 4961 4962 return DAG 4963 .getConstant( 4964 (int)C->getZExtValue() >> 2, SDLoc(Op), 4965 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4966 .getNode(); 4967 } 4968 4969 namespace { 4970 4971 struct TailCallArgumentInfo { 4972 SDValue Arg; 4973 SDValue FrameIdxOp; 4974 int FrameIdx = 0; 4975 4976 TailCallArgumentInfo() = default; 4977 }; 4978 4979 } // end anonymous namespace 4980 4981 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4982 static void StoreTailCallArgumentsToStackSlot( 4983 SelectionDAG &DAG, SDValue Chain, 4984 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4985 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4986 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4987 SDValue Arg = TailCallArgs[i].Arg; 4988 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4989 int FI = TailCallArgs[i].FrameIdx; 4990 // Store relative to framepointer. 4991 MemOpChains.push_back(DAG.getStore( 4992 Chain, dl, Arg, FIN, 4993 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4994 } 4995 } 4996 4997 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4998 /// the appropriate stack slot for the tail call optimized function call. 4999 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 5000 SDValue OldRetAddr, SDValue OldFP, 5001 int SPDiff, const SDLoc &dl) { 5002 if (SPDiff) { 5003 // Calculate the new stack slot for the return address. 5004 MachineFunction &MF = DAG.getMachineFunction(); 5005 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 5006 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 5007 bool isPPC64 = Subtarget.isPPC64(); 5008 int SlotSize = isPPC64 ? 8 : 4; 5009 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 5010 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 5011 NewRetAddrLoc, true); 5012 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 5013 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 5014 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 5015 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 5016 } 5017 return Chain; 5018 } 5019 5020 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 5021 /// the position of the argument. 5022 static void 5023 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 5024 SDValue Arg, int SPDiff, unsigned ArgOffset, 5025 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 5026 int Offset = ArgOffset + SPDiff; 5027 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 5028 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 5029 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 5030 SDValue FIN = DAG.getFrameIndex(FI, VT); 5031 TailCallArgumentInfo Info; 5032 Info.Arg = Arg; 5033 Info.FrameIdxOp = FIN; 5034 Info.FrameIdx = FI; 5035 TailCallArguments.push_back(Info); 5036 } 5037 5038 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 5039 /// stack slot. Returns the chain as result and the loaded frame pointers in 5040 /// LROpOut/FPOpout. Used when tail calling. 5041 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 5042 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 5043 SDValue &FPOpOut, const SDLoc &dl) const { 5044 if (SPDiff) { 5045 // Load the LR and FP stack slot for later adjusting. 5046 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 5047 LROpOut = getReturnAddrFrameIndex(DAG); 5048 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 5049 Chain = SDValue(LROpOut.getNode(), 1); 5050 } 5051 return Chain; 5052 } 5053 5054 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 5055 /// by "Src" to address "Dst" of size "Size". Alignment information is 5056 /// specified by the specific parameter attribute. The copy will be passed as 5057 /// a byval function parameter. 5058 /// Sometimes what we are copying is the end of a larger object, the part that 5059 /// does not fit in registers. 5060 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 5061 SDValue Chain, ISD::ArgFlagsTy Flags, 5062 SelectionDAG &DAG, const SDLoc &dl) { 5063 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 5064 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, 5065 Flags.getNonZeroByValAlign(), false, false, false, 5066 MachinePointerInfo(), MachinePointerInfo()); 5067 } 5068 5069 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 5070 /// tail calls. 5071 static void LowerMemOpCallTo( 5072 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 5073 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 5074 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 5075 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 5076 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5077 if (!isTailCall) { 5078 if (isVector) { 5079 SDValue StackPtr; 5080 if (isPPC64) 5081 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5082 else 5083 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5084 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 5085 DAG.getConstant(ArgOffset, dl, PtrVT)); 5086 } 5087 MemOpChains.push_back( 5088 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5089 // Calculate and remember argument location. 5090 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 5091 TailCallArguments); 5092 } 5093 5094 static void 5095 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 5096 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 5097 SDValue FPOp, 5098 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 5099 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 5100 // might overwrite each other in case of tail call optimization. 5101 SmallVector<SDValue, 8> MemOpChains2; 5102 // Do not flag preceding copytoreg stuff together with the following stuff. 5103 InFlag = SDValue(); 5104 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 5105 MemOpChains2, dl); 5106 if (!MemOpChains2.empty()) 5107 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 5108 5109 // Store the return address to the appropriate stack slot. 5110 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 5111 5112 // Emit callseq_end just before tailcall node. 5113 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5114 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 5115 InFlag = Chain.getValue(1); 5116 } 5117 5118 // Is this global address that of a function that can be called by name? (as 5119 // opposed to something that must hold a descriptor for an indirect call). 5120 static bool isFunctionGlobalAddress(SDValue Callee) { 5121 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 5122 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 5123 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 5124 return false; 5125 5126 return G->getGlobal()->getValueType()->isFunctionTy(); 5127 } 5128 5129 return false; 5130 } 5131 5132 SDValue PPCTargetLowering::LowerCallResult( 5133 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 5134 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5135 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 5136 SmallVector<CCValAssign, 16> RVLocs; 5137 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5138 *DAG.getContext()); 5139 5140 CCRetInfo.AnalyzeCallResult( 5141 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 5142 ? RetCC_PPC_Cold 5143 : RetCC_PPC); 5144 5145 // Copy all of the result registers out of their specified physreg. 5146 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 5147 CCValAssign &VA = RVLocs[i]; 5148 assert(VA.isRegLoc() && "Can only return in registers!"); 5149 5150 SDValue Val; 5151 5152 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 5153 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 5154 InFlag); 5155 Chain = Lo.getValue(1); 5156 InFlag = Lo.getValue(2); 5157 VA = RVLocs[++i]; // skip ahead to next loc 5158 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 5159 InFlag); 5160 Chain = Hi.getValue(1); 5161 InFlag = Hi.getValue(2); 5162 if (!Subtarget.isLittleEndian()) 5163 std::swap (Lo, Hi); 5164 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi); 5165 } else { 5166 Val = DAG.getCopyFromReg(Chain, dl, 5167 VA.getLocReg(), VA.getLocVT(), InFlag); 5168 Chain = Val.getValue(1); 5169 InFlag = Val.getValue(2); 5170 } 5171 5172 switch (VA.getLocInfo()) { 5173 default: llvm_unreachable("Unknown loc info!"); 5174 case CCValAssign::Full: break; 5175 case CCValAssign::AExt: 5176 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5177 break; 5178 case CCValAssign::ZExt: 5179 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 5180 DAG.getValueType(VA.getValVT())); 5181 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5182 break; 5183 case CCValAssign::SExt: 5184 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 5185 DAG.getValueType(VA.getValVT())); 5186 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5187 break; 5188 } 5189 5190 InVals.push_back(Val); 5191 } 5192 5193 return Chain; 5194 } 5195 5196 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG, 5197 const PPCSubtarget &Subtarget, bool isPatchPoint) { 5198 // PatchPoint calls are not indirect. 5199 if (isPatchPoint) 5200 return false; 5201 5202 if (isFunctionGlobalAddress(Callee) || isa<ExternalSymbolSDNode>(Callee)) 5203 return false; 5204 5205 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not 5206 // becuase the immediate function pointer points to a descriptor instead of 5207 // a function entry point. The ELFv2 ABI cannot use a BLA because the function 5208 // pointer immediate points to the global entry point, while the BLA would 5209 // need to jump to the local entry point (see rL211174). 5210 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() && 5211 isBLACompatibleAddress(Callee, DAG)) 5212 return false; 5213 5214 return true; 5215 } 5216 5217 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls. 5218 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) { 5219 return Subtarget.isAIXABI() || 5220 (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()); 5221 } 5222 5223 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags, 5224 const Function &Caller, const SDValue &Callee, 5225 const PPCSubtarget &Subtarget, 5226 const TargetMachine &TM, 5227 bool IsStrictFPCall = false) { 5228 if (CFlags.IsTailCall) 5229 return PPCISD::TC_RETURN; 5230 5231 unsigned RetOpc = 0; 5232 // This is a call through a function pointer. 5233 if (CFlags.IsIndirect) { 5234 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross 5235 // indirect calls. The save of the caller's TOC pointer to the stack will be 5236 // inserted into the DAG as part of call lowering. The restore of the TOC 5237 // pointer is modeled by using a pseudo instruction for the call opcode that 5238 // represents the 2 instruction sequence of an indirect branch and link, 5239 // immediately followed by a load of the TOC pointer from the the stack save 5240 // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC 5241 // as it is not saved or used. 5242 RetOpc = isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC 5243 : PPCISD::BCTRL; 5244 } else if (Subtarget.isUsingPCRelativeCalls()) { 5245 assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI."); 5246 RetOpc = PPCISD::CALL_NOTOC; 5247 } else if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) 5248 // The ABIs that maintain a TOC pointer accross calls need to have a nop 5249 // immediately following the call instruction if the caller and callee may 5250 // have different TOC bases. At link time if the linker determines the calls 5251 // may not share a TOC base, the call is redirected to a trampoline inserted 5252 // by the linker. The trampoline will (among other things) save the callers 5253 // TOC pointer at an ABI designated offset in the linkage area and the 5254 // linker will rewrite the nop to be a load of the TOC pointer from the 5255 // linkage area into gpr2. 5256 RetOpc = callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL 5257 : PPCISD::CALL_NOP; 5258 else 5259 RetOpc = PPCISD::CALL; 5260 if (IsStrictFPCall) { 5261 switch (RetOpc) { 5262 default: 5263 llvm_unreachable("Unknown call opcode"); 5264 case PPCISD::BCTRL_LOAD_TOC: 5265 RetOpc = PPCISD::BCTRL_LOAD_TOC_RM; 5266 break; 5267 case PPCISD::BCTRL: 5268 RetOpc = PPCISD::BCTRL_RM; 5269 break; 5270 case PPCISD::CALL_NOTOC: 5271 RetOpc = PPCISD::CALL_NOTOC_RM; 5272 break; 5273 case PPCISD::CALL: 5274 RetOpc = PPCISD::CALL_RM; 5275 break; 5276 case PPCISD::CALL_NOP: 5277 RetOpc = PPCISD::CALL_NOP_RM; 5278 break; 5279 } 5280 } 5281 return RetOpc; 5282 } 5283 5284 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, 5285 const SDLoc &dl, const PPCSubtarget &Subtarget) { 5286 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI()) 5287 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 5288 return SDValue(Dest, 0); 5289 5290 // Returns true if the callee is local, and false otherwise. 5291 auto isLocalCallee = [&]() { 5292 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 5293 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5294 const GlobalValue *GV = G ? G->getGlobal() : nullptr; 5295 5296 return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) && 5297 !isa_and_nonnull<GlobalIFunc>(GV); 5298 }; 5299 5300 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in 5301 // a static relocation model causes some versions of GNU LD (2.17.50, at 5302 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are 5303 // built with secure-PLT. 5304 bool UsePlt = 5305 Subtarget.is32BitELFABI() && !isLocalCallee() && 5306 Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_; 5307 5308 const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) { 5309 const TargetMachine &TM = Subtarget.getTargetMachine(); 5310 const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering(); 5311 MCSymbolXCOFF *S = 5312 cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM)); 5313 5314 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5315 return DAG.getMCSymbol(S, PtrVT); 5316 }; 5317 5318 if (isFunctionGlobalAddress(Callee)) { 5319 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5320 5321 if (Subtarget.isAIXABI()) { 5322 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX."); 5323 return getAIXFuncEntryPointSymbolSDNode(GV); 5324 } 5325 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0, 5326 UsePlt ? PPCII::MO_PLT : 0); 5327 } 5328 5329 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 5330 const char *SymName = S->getSymbol(); 5331 if (Subtarget.isAIXABI()) { 5332 // If there exists a user-declared function whose name is the same as the 5333 // ExternalSymbol's, then we pick up the user-declared version. 5334 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5335 if (const Function *F = 5336 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) 5337 return getAIXFuncEntryPointSymbolSDNode(F); 5338 5339 // On AIX, direct function calls reference the symbol for the function's 5340 // entry point, which is named by prepending a "." before the function's 5341 // C-linkage name. A Qualname is returned here because an external 5342 // function entry point is a csect with XTY_ER property. 5343 const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) { 5344 auto &Context = DAG.getMachineFunction().getMMI().getContext(); 5345 MCSectionXCOFF *Sec = Context.getXCOFFSection( 5346 (Twine(".") + Twine(SymName)).str(), SectionKind::getMetadata(), 5347 XCOFF::CsectProperties(XCOFF::XMC_PR, XCOFF::XTY_ER)); 5348 return Sec->getQualNameSymbol(); 5349 }; 5350 5351 SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data(); 5352 } 5353 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(), 5354 UsePlt ? PPCII::MO_PLT : 0); 5355 } 5356 5357 // No transformation needed. 5358 assert(Callee.getNode() && "What no callee?"); 5359 return Callee; 5360 } 5361 5362 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) { 5363 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START && 5364 "Expected a CALLSEQ_STARTSDNode."); 5365 5366 // The last operand is the chain, except when the node has glue. If the node 5367 // has glue, then the last operand is the glue, and the chain is the second 5368 // last operand. 5369 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1); 5370 if (LastValue.getValueType() != MVT::Glue) 5371 return LastValue; 5372 5373 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2); 5374 } 5375 5376 // Creates the node that moves a functions address into the count register 5377 // to prepare for an indirect call instruction. 5378 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5379 SDValue &Glue, SDValue &Chain, 5380 const SDLoc &dl) { 5381 SDValue MTCTROps[] = {Chain, Callee, Glue}; 5382 EVT ReturnTypes[] = {MVT::Other, MVT::Glue}; 5383 Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2), 5384 makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2)); 5385 // The glue is the second value produced. 5386 Glue = Chain.getValue(1); 5387 } 5388 5389 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5390 SDValue &Glue, SDValue &Chain, 5391 SDValue CallSeqStart, 5392 const CallBase *CB, const SDLoc &dl, 5393 bool hasNest, 5394 const PPCSubtarget &Subtarget) { 5395 // Function pointers in the 64-bit SVR4 ABI do not point to the function 5396 // entry point, but to the function descriptor (the function entry point 5397 // address is part of the function descriptor though). 5398 // The function descriptor is a three doubleword structure with the 5399 // following fields: function entry point, TOC base address and 5400 // environment pointer. 5401 // Thus for a call through a function pointer, the following actions need 5402 // to be performed: 5403 // 1. Save the TOC of the caller in the TOC save area of its stack 5404 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 5405 // 2. Load the address of the function entry point from the function 5406 // descriptor. 5407 // 3. Load the TOC of the callee from the function descriptor into r2. 5408 // 4. Load the environment pointer from the function descriptor into 5409 // r11. 5410 // 5. Branch to the function entry point address. 5411 // 6. On return of the callee, the TOC of the caller needs to be 5412 // restored (this is done in FinishCall()). 5413 // 5414 // The loads are scheduled at the beginning of the call sequence, and the 5415 // register copies are flagged together to ensure that no other 5416 // operations can be scheduled in between. E.g. without flagging the 5417 // copies together, a TOC access in the caller could be scheduled between 5418 // the assignment of the callee TOC and the branch to the callee, which leads 5419 // to incorrect code. 5420 5421 // Start by loading the function address from the descriptor. 5422 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart); 5423 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 5424 ? (MachineMemOperand::MODereferenceable | 5425 MachineMemOperand::MOInvariant) 5426 : MachineMemOperand::MONone; 5427 5428 MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr); 5429 5430 // Registers used in building the DAG. 5431 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister(); 5432 const MCRegister TOCReg = Subtarget.getTOCPointerRegister(); 5433 5434 // Offsets of descriptor members. 5435 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset(); 5436 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset(); 5437 5438 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 5439 const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4; 5440 5441 // One load for the functions entry point address. 5442 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI, 5443 Alignment, MMOFlags); 5444 5445 // One for loading the TOC anchor for the module that contains the called 5446 // function. 5447 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl); 5448 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff); 5449 SDValue TOCPtr = 5450 DAG.getLoad(RegVT, dl, LDChain, AddTOC, 5451 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags); 5452 5453 // One for loading the environment pointer. 5454 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl); 5455 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff); 5456 SDValue LoadEnvPtr = 5457 DAG.getLoad(RegVT, dl, LDChain, AddPtr, 5458 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags); 5459 5460 5461 // Then copy the newly loaded TOC anchor to the TOC pointer. 5462 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue); 5463 Chain = TOCVal.getValue(0); 5464 Glue = TOCVal.getValue(1); 5465 5466 // If the function call has an explicit 'nest' parameter, it takes the 5467 // place of the environment pointer. 5468 assert((!hasNest || !Subtarget.isAIXABI()) && 5469 "Nest parameter is not supported on AIX."); 5470 if (!hasNest) { 5471 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue); 5472 Chain = EnvVal.getValue(0); 5473 Glue = EnvVal.getValue(1); 5474 } 5475 5476 // The rest of the indirect call sequence is the same as the non-descriptor 5477 // DAG. 5478 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl); 5479 } 5480 5481 static void 5482 buildCallOperands(SmallVectorImpl<SDValue> &Ops, 5483 PPCTargetLowering::CallFlags CFlags, const SDLoc &dl, 5484 SelectionDAG &DAG, 5485 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, 5486 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, 5487 const PPCSubtarget &Subtarget) { 5488 const bool IsPPC64 = Subtarget.isPPC64(); 5489 // MVT for a general purpose register. 5490 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 5491 5492 // First operand is always the chain. 5493 Ops.push_back(Chain); 5494 5495 // If it's a direct call pass the callee as the second operand. 5496 if (!CFlags.IsIndirect) 5497 Ops.push_back(Callee); 5498 else { 5499 assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect."); 5500 5501 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area 5502 // on the stack (this would have been done in `LowerCall_64SVR4` or 5503 // `LowerCall_AIX`). The call instruction is a pseudo instruction that 5504 // represents both the indirect branch and a load that restores the TOC 5505 // pointer from the linkage area. The operand for the TOC restore is an add 5506 // of the TOC save offset to the stack pointer. This must be the second 5507 // operand: after the chain input but before any other variadic arguments. 5508 // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not 5509 // saved or used. 5510 if (isTOCSaveRestoreRequired(Subtarget)) { 5511 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 5512 5513 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT); 5514 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5515 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5516 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff); 5517 Ops.push_back(AddTOC); 5518 } 5519 5520 // Add the register used for the environment pointer. 5521 if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest) 5522 Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(), 5523 RegVT)); 5524 5525 5526 // Add CTR register as callee so a bctr can be emitted later. 5527 if (CFlags.IsTailCall) 5528 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT)); 5529 } 5530 5531 // If this is a tail call add stack pointer delta. 5532 if (CFlags.IsTailCall) 5533 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 5534 5535 // Add argument registers to the end of the list so that they are known live 5536 // into the call. 5537 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 5538 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 5539 RegsToPass[i].second.getValueType())); 5540 5541 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is 5542 // no way to mark dependencies as implicit here. 5543 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter. 5544 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) && 5545 !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls()) 5546 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT)); 5547 5548 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 5549 if (CFlags.IsVarArg && Subtarget.is32BitELFABI()) 5550 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 5551 5552 // Add a register mask operand representing the call-preserved registers. 5553 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5554 const uint32_t *Mask = 5555 TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv); 5556 assert(Mask && "Missing call preserved mask for calling convention"); 5557 Ops.push_back(DAG.getRegisterMask(Mask)); 5558 5559 // If the glue is valid, it is the last operand. 5560 if (Glue.getNode()) 5561 Ops.push_back(Glue); 5562 } 5563 5564 SDValue PPCTargetLowering::FinishCall( 5565 CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG, 5566 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue, 5567 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 5568 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 5569 SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const { 5570 5571 if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) || 5572 Subtarget.isAIXABI()) 5573 setUsesTOCBasePtr(DAG); 5574 5575 unsigned CallOpc = 5576 getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee, 5577 Subtarget, DAG.getTarget(), CB ? CB->isStrictFP() : false); 5578 5579 if (!CFlags.IsIndirect) 5580 Callee = transformCallee(Callee, DAG, dl, Subtarget); 5581 else if (Subtarget.usesFunctionDescriptors()) 5582 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB, 5583 dl, CFlags.HasNest, Subtarget); 5584 else 5585 prepareIndirectCall(DAG, Callee, Glue, Chain, dl); 5586 5587 // Build the operand list for the call instruction. 5588 SmallVector<SDValue, 8> Ops; 5589 buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee, 5590 SPDiff, Subtarget); 5591 5592 // Emit tail call. 5593 if (CFlags.IsTailCall) { 5594 // Indirect tail call when using PC Relative calls do not have the same 5595 // constraints. 5596 assert(((Callee.getOpcode() == ISD::Register && 5597 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5598 Callee.getOpcode() == ISD::TargetExternalSymbol || 5599 Callee.getOpcode() == ISD::TargetGlobalAddress || 5600 isa<ConstantSDNode>(Callee) || 5601 (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && 5602 "Expecting a global address, external symbol, absolute value, " 5603 "register or an indirect tail call when PC Relative calls are " 5604 "used."); 5605 // PC Relative calls also use TC_RETURN as the way to mark tail calls. 5606 assert(CallOpc == PPCISD::TC_RETURN && 5607 "Unexpected call opcode for a tail call."); 5608 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5609 return DAG.getNode(CallOpc, dl, MVT::Other, Ops); 5610 } 5611 5612 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}}; 5613 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops); 5614 DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge); 5615 Glue = Chain.getValue(1); 5616 5617 // When performing tail call optimization the callee pops its arguments off 5618 // the stack. Account for this here so these bytes can be pushed back on in 5619 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5620 int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast && 5621 getTargetMachine().Options.GuaranteedTailCallOpt) 5622 ? NumBytes 5623 : 0; 5624 5625 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5626 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5627 Glue, dl); 5628 Glue = Chain.getValue(1); 5629 5630 return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl, 5631 DAG, InVals); 5632 } 5633 5634 SDValue 5635 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5636 SmallVectorImpl<SDValue> &InVals) const { 5637 SelectionDAG &DAG = CLI.DAG; 5638 SDLoc &dl = CLI.DL; 5639 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5640 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5641 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5642 SDValue Chain = CLI.Chain; 5643 SDValue Callee = CLI.Callee; 5644 bool &isTailCall = CLI.IsTailCall; 5645 CallingConv::ID CallConv = CLI.CallConv; 5646 bool isVarArg = CLI.IsVarArg; 5647 bool isPatchPoint = CLI.IsPatchPoint; 5648 const CallBase *CB = CLI.CB; 5649 5650 if (isTailCall) { 5651 if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall())) 5652 isTailCall = false; 5653 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5654 isTailCall = IsEligibleForTailCallOptimization_64SVR4( 5655 Callee, CallConv, CB, isVarArg, Outs, Ins, DAG); 5656 else 5657 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5658 Ins, DAG); 5659 if (isTailCall) { 5660 ++NumTailCalls; 5661 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5662 ++NumSiblingCalls; 5663 5664 // PC Relative calls no longer guarantee that the callee is a Global 5665 // Address Node. The callee could be an indirect tail call in which 5666 // case the SDValue for the callee could be a load (to load the address 5667 // of a function pointer) or it may be a register copy (to move the 5668 // address of the callee from a function parameter into a virtual 5669 // register). It may also be an ExternalSymbolSDNode (ex memcopy). 5670 assert((Subtarget.isUsingPCRelativeCalls() || 5671 isa<GlobalAddressSDNode>(Callee)) && 5672 "Callee should be an llvm::Function object."); 5673 5674 LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName() 5675 << "\nTCO callee: "); 5676 LLVM_DEBUG(Callee.dump()); 5677 } 5678 } 5679 5680 if (!isTailCall && CB && CB->isMustTailCall()) 5681 report_fatal_error("failed to perform tail call elimination on a call " 5682 "site marked musttail"); 5683 5684 // When long calls (i.e. indirect calls) are always used, calls are always 5685 // made via function pointer. If we have a function name, first translate it 5686 // into a pointer. 5687 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5688 !isTailCall) 5689 Callee = LowerGlobalAddress(Callee, DAG); 5690 5691 CallFlags CFlags( 5692 CallConv, isTailCall, isVarArg, isPatchPoint, 5693 isIndirectCall(Callee, DAG, Subtarget, isPatchPoint), 5694 // hasNest 5695 Subtarget.is64BitELFABI() && 5696 any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }), 5697 CLI.NoMerge); 5698 5699 if (Subtarget.isAIXABI()) 5700 return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5701 InVals, CB); 5702 5703 assert(Subtarget.isSVR4ABI()); 5704 if (Subtarget.isPPC64()) 5705 return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5706 InVals, CB); 5707 return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG, 5708 InVals, CB); 5709 } 5710 5711 SDValue PPCTargetLowering::LowerCall_32SVR4( 5712 SDValue Chain, SDValue Callee, CallFlags CFlags, 5713 const SmallVectorImpl<ISD::OutputArg> &Outs, 5714 const SmallVectorImpl<SDValue> &OutVals, 5715 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5716 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5717 const CallBase *CB) const { 5718 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5719 // of the 32-bit SVR4 ABI stack frame layout. 5720 5721 const CallingConv::ID CallConv = CFlags.CallConv; 5722 const bool IsVarArg = CFlags.IsVarArg; 5723 const bool IsTailCall = CFlags.IsTailCall; 5724 5725 assert((CallConv == CallingConv::C || 5726 CallConv == CallingConv::Cold || 5727 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5728 5729 const Align PtrAlign(4); 5730 5731 MachineFunction &MF = DAG.getMachineFunction(); 5732 5733 // Mark this function as potentially containing a function that contains a 5734 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5735 // and restoring the callers stack pointer in this functions epilog. This is 5736 // done because by tail calling the called function might overwrite the value 5737 // in this function's (MF) stack pointer stack slot 0(SP). 5738 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5739 CallConv == CallingConv::Fast) 5740 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5741 5742 // Count how many bytes are to be pushed on the stack, including the linkage 5743 // area, parameter list area and the part of the local variable space which 5744 // contains copies of aggregates which are passed by value. 5745 5746 // Assign locations to all of the outgoing arguments. 5747 SmallVector<CCValAssign, 16> ArgLocs; 5748 PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 5749 5750 // Reserve space for the linkage area on the stack. 5751 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5752 PtrAlign); 5753 if (useSoftFloat()) 5754 CCInfo.PreAnalyzeCallOperands(Outs); 5755 5756 if (IsVarArg) { 5757 // Handle fixed and variable vector arguments differently. 5758 // Fixed vector arguments go into registers as long as registers are 5759 // available. Variable vector arguments always go into memory. 5760 unsigned NumArgs = Outs.size(); 5761 5762 for (unsigned i = 0; i != NumArgs; ++i) { 5763 MVT ArgVT = Outs[i].VT; 5764 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5765 bool Result; 5766 5767 if (Outs[i].IsFixed) { 5768 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5769 CCInfo); 5770 } else { 5771 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5772 ArgFlags, CCInfo); 5773 } 5774 5775 if (Result) { 5776 #ifndef NDEBUG 5777 errs() << "Call operand #" << i << " has unhandled type " 5778 << EVT(ArgVT).getEVTString() << "\n"; 5779 #endif 5780 llvm_unreachable(nullptr); 5781 } 5782 } 5783 } else { 5784 // All arguments are treated the same. 5785 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5786 } 5787 CCInfo.clearWasPPCF128(); 5788 5789 // Assign locations to all of the outgoing aggregate by value arguments. 5790 SmallVector<CCValAssign, 16> ByValArgLocs; 5791 CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext()); 5792 5793 // Reserve stack space for the allocations in CCInfo. 5794 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); 5795 5796 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5797 5798 // Size of the linkage area, parameter list area and the part of the local 5799 // space variable where copies of aggregates which are passed by value are 5800 // stored. 5801 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5802 5803 // Calculate by how many bytes the stack has to be adjusted in case of tail 5804 // call optimization. 5805 int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes); 5806 5807 // Adjust the stack pointer for the new arguments... 5808 // These operations are automatically eliminated by the prolog/epilog pass 5809 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5810 SDValue CallSeqStart = Chain; 5811 5812 // Load the return address and frame pointer so it can be moved somewhere else 5813 // later. 5814 SDValue LROp, FPOp; 5815 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5816 5817 // Set up a copy of the stack pointer for use loading and storing any 5818 // arguments that may not fit in the registers available for argument 5819 // passing. 5820 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5821 5822 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5823 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5824 SmallVector<SDValue, 8> MemOpChains; 5825 5826 bool seenFloatArg = false; 5827 // Walk the register/memloc assignments, inserting copies/loads. 5828 // i - Tracks the index into the list of registers allocated for the call 5829 // RealArgIdx - Tracks the index into the list of actual function arguments 5830 // j - Tracks the index into the list of byval arguments 5831 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size(); 5832 i != e; 5833 ++i, ++RealArgIdx) { 5834 CCValAssign &VA = ArgLocs[i]; 5835 SDValue Arg = OutVals[RealArgIdx]; 5836 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags; 5837 5838 if (Flags.isByVal()) { 5839 // Argument is an aggregate which is passed by value, thus we need to 5840 // create a copy of it in the local variable space of the current stack 5841 // frame (which is the stack frame of the caller) and pass the address of 5842 // this copy to the callee. 5843 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5844 CCValAssign &ByValVA = ByValArgLocs[j++]; 5845 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5846 5847 // Memory reserved in the local variable space of the callers stack frame. 5848 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5849 5850 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5851 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5852 StackPtr, PtrOff); 5853 5854 // Create a copy of the argument in the local area of the current 5855 // stack frame. 5856 SDValue MemcpyCall = 5857 CreateCopyOfByValArgument(Arg, PtrOff, 5858 CallSeqStart.getNode()->getOperand(0), 5859 Flags, DAG, dl); 5860 5861 // This must go outside the CALLSEQ_START..END. 5862 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5863 SDLoc(MemcpyCall)); 5864 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5865 NewCallSeqStart.getNode()); 5866 Chain = CallSeqStart = NewCallSeqStart; 5867 5868 // Pass the address of the aggregate copy on the stack either in a 5869 // physical register or in the parameter list area of the current stack 5870 // frame to the callee. 5871 Arg = PtrOff; 5872 } 5873 5874 // When useCRBits() is true, there can be i1 arguments. 5875 // It is because getRegisterType(MVT::i1) => MVT::i1, 5876 // and for other integer types getRegisterType() => MVT::i32. 5877 // Extend i1 and ensure callee will get i32. 5878 if (Arg.getValueType() == MVT::i1) 5879 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 5880 dl, MVT::i32, Arg); 5881 5882 if (VA.isRegLoc()) { 5883 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5884 // Put argument in a physical register. 5885 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) { 5886 bool IsLE = Subtarget.isLittleEndian(); 5887 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5888 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl)); 5889 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0))); 5890 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5891 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl)); 5892 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(), 5893 SVal.getValue(0))); 5894 } else 5895 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5896 } else { 5897 // Put argument in the parameter list area of the current stack frame. 5898 assert(VA.isMemLoc()); 5899 unsigned LocMemOffset = VA.getLocMemOffset(); 5900 5901 if (!IsTailCall) { 5902 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5903 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5904 StackPtr, PtrOff); 5905 5906 MemOpChains.push_back( 5907 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5908 } else { 5909 // Calculate and remember argument location. 5910 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5911 TailCallArguments); 5912 } 5913 } 5914 } 5915 5916 if (!MemOpChains.empty()) 5917 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5918 5919 // Build a sequence of copy-to-reg nodes chained together with token chain 5920 // and flag operands which copy the outgoing args into the appropriate regs. 5921 SDValue InFlag; 5922 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5923 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5924 RegsToPass[i].second, InFlag); 5925 InFlag = Chain.getValue(1); 5926 } 5927 5928 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5929 // registers. 5930 if (IsVarArg) { 5931 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5932 SDValue Ops[] = { Chain, InFlag }; 5933 5934 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5935 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5936 5937 InFlag = Chain.getValue(1); 5938 } 5939 5940 if (IsTailCall) 5941 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5942 TailCallArguments); 5943 5944 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 5945 Callee, SPDiff, NumBytes, Ins, InVals, CB); 5946 } 5947 5948 // Copy an argument into memory, being careful to do this outside the 5949 // call sequence for the call to which the argument belongs. 5950 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5951 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5952 SelectionDAG &DAG, const SDLoc &dl) const { 5953 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5954 CallSeqStart.getNode()->getOperand(0), 5955 Flags, DAG, dl); 5956 // The MEMCPY must go outside the CALLSEQ_START..END. 5957 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5958 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5959 SDLoc(MemcpyCall)); 5960 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5961 NewCallSeqStart.getNode()); 5962 return NewCallSeqStart; 5963 } 5964 5965 SDValue PPCTargetLowering::LowerCall_64SVR4( 5966 SDValue Chain, SDValue Callee, CallFlags CFlags, 5967 const SmallVectorImpl<ISD::OutputArg> &Outs, 5968 const SmallVectorImpl<SDValue> &OutVals, 5969 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5970 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5971 const CallBase *CB) const { 5972 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5973 bool isLittleEndian = Subtarget.isLittleEndian(); 5974 unsigned NumOps = Outs.size(); 5975 bool IsSibCall = false; 5976 bool IsFastCall = CFlags.CallConv == CallingConv::Fast; 5977 5978 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5979 unsigned PtrByteSize = 8; 5980 5981 MachineFunction &MF = DAG.getMachineFunction(); 5982 5983 if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5984 IsSibCall = true; 5985 5986 // Mark this function as potentially containing a function that contains a 5987 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5988 // and restoring the callers stack pointer in this functions epilog. This is 5989 // done because by tail calling the called function might overwrite the value 5990 // in this function's (MF) stack pointer stack slot 0(SP). 5991 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall) 5992 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5993 5994 assert(!(IsFastCall && CFlags.IsVarArg) && 5995 "fastcc not supported on varargs functions"); 5996 5997 // Count how many bytes are to be pushed on the stack, including the linkage 5998 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5999 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 6000 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 6001 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6002 unsigned NumBytes = LinkageSize; 6003 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6004 6005 static const MCPhysReg GPR[] = { 6006 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6007 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6008 }; 6009 static const MCPhysReg VR[] = { 6010 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6011 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6012 }; 6013 6014 const unsigned NumGPRs = array_lengthof(GPR); 6015 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 6016 const unsigned NumVRs = array_lengthof(VR); 6017 6018 // On ELFv2, we can avoid allocating the parameter area if all the arguments 6019 // can be passed to the callee in registers. 6020 // For the fast calling convention, there is another check below. 6021 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 6022 bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall; 6023 if (!HasParameterArea) { 6024 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 6025 unsigned AvailableFPRs = NumFPRs; 6026 unsigned AvailableVRs = NumVRs; 6027 unsigned NumBytesTmp = NumBytes; 6028 for (unsigned i = 0; i != NumOps; ++i) { 6029 if (Outs[i].Flags.isNest()) continue; 6030 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 6031 PtrByteSize, LinkageSize, ParamAreaSize, 6032 NumBytesTmp, AvailableFPRs, AvailableVRs)) 6033 HasParameterArea = true; 6034 } 6035 } 6036 6037 // When using the fast calling convention, we don't provide backing for 6038 // arguments that will be in registers. 6039 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 6040 6041 // Avoid allocating parameter area for fastcc functions if all the arguments 6042 // can be passed in the registers. 6043 if (IsFastCall) 6044 HasParameterArea = false; 6045 6046 // Add up all the space actually used. 6047 for (unsigned i = 0; i != NumOps; ++i) { 6048 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6049 EVT ArgVT = Outs[i].VT; 6050 EVT OrigVT = Outs[i].ArgVT; 6051 6052 if (Flags.isNest()) 6053 continue; 6054 6055 if (IsFastCall) { 6056 if (Flags.isByVal()) { 6057 NumGPRsUsed += (Flags.getByValSize()+7)/8; 6058 if (NumGPRsUsed > NumGPRs) 6059 HasParameterArea = true; 6060 } else { 6061 switch (ArgVT.getSimpleVT().SimpleTy) { 6062 default: llvm_unreachable("Unexpected ValueType for argument!"); 6063 case MVT::i1: 6064 case MVT::i32: 6065 case MVT::i64: 6066 if (++NumGPRsUsed <= NumGPRs) 6067 continue; 6068 break; 6069 case MVT::v4i32: 6070 case MVT::v8i16: 6071 case MVT::v16i8: 6072 case MVT::v2f64: 6073 case MVT::v2i64: 6074 case MVT::v1i128: 6075 case MVT::f128: 6076 if (++NumVRsUsed <= NumVRs) 6077 continue; 6078 break; 6079 case MVT::v4f32: 6080 if (++NumVRsUsed <= NumVRs) 6081 continue; 6082 break; 6083 case MVT::f32: 6084 case MVT::f64: 6085 if (++NumFPRsUsed <= NumFPRs) 6086 continue; 6087 break; 6088 } 6089 HasParameterArea = true; 6090 } 6091 } 6092 6093 /* Respect alignment of argument on the stack. */ 6094 auto Alignement = 6095 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 6096 NumBytes = alignTo(NumBytes, Alignement); 6097 6098 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6099 if (Flags.isInConsecutiveRegsLast()) 6100 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 6101 } 6102 6103 unsigned NumBytesActuallyUsed = NumBytes; 6104 6105 // In the old ELFv1 ABI, 6106 // the prolog code of the callee may store up to 8 GPR argument registers to 6107 // the stack, allowing va_start to index over them in memory if its varargs. 6108 // Because we cannot tell if this is needed on the caller side, we have to 6109 // conservatively assume that it is needed. As such, make sure we have at 6110 // least enough stack space for the caller to store the 8 GPRs. 6111 // In the ELFv2 ABI, we allocate the parameter area iff a callee 6112 // really requires memory operands, e.g. a vararg function. 6113 if (HasParameterArea) 6114 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6115 else 6116 NumBytes = LinkageSize; 6117 6118 // Tail call needs the stack to be aligned. 6119 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall) 6120 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6121 6122 int SPDiff = 0; 6123 6124 // Calculate by how many bytes the stack has to be adjusted in case of tail 6125 // call optimization. 6126 if (!IsSibCall) 6127 SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes); 6128 6129 // To protect arguments on the stack from being clobbered in a tail call, 6130 // force all the loads to happen before doing any other lowering. 6131 if (CFlags.IsTailCall) 6132 Chain = DAG.getStackArgumentTokenFactor(Chain); 6133 6134 // Adjust the stack pointer for the new arguments... 6135 // These operations are automatically eliminated by the prolog/epilog pass 6136 if (!IsSibCall) 6137 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6138 SDValue CallSeqStart = Chain; 6139 6140 // Load the return address and frame pointer so it can be move somewhere else 6141 // later. 6142 SDValue LROp, FPOp; 6143 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6144 6145 // Set up a copy of the stack pointer for use loading and storing any 6146 // arguments that may not fit in the registers available for argument 6147 // passing. 6148 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6149 6150 // Figure out which arguments are going to go in registers, and which in 6151 // memory. Also, if this is a vararg function, floating point operations 6152 // must be stored to our stack, and loaded into integer regs as well, if 6153 // any integer regs are available for argument passing. 6154 unsigned ArgOffset = LinkageSize; 6155 6156 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6157 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6158 6159 SmallVector<SDValue, 8> MemOpChains; 6160 for (unsigned i = 0; i != NumOps; ++i) { 6161 SDValue Arg = OutVals[i]; 6162 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6163 EVT ArgVT = Outs[i].VT; 6164 EVT OrigVT = Outs[i].ArgVT; 6165 6166 // PtrOff will be used to store the current argument to the stack if a 6167 // register cannot be found for it. 6168 SDValue PtrOff; 6169 6170 // We re-align the argument offset for each argument, except when using the 6171 // fast calling convention, when we need to make sure we do that only when 6172 // we'll actually use a stack slot. 6173 auto ComputePtrOff = [&]() { 6174 /* Respect alignment of argument on the stack. */ 6175 auto Alignment = 6176 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 6177 ArgOffset = alignTo(ArgOffset, Alignment); 6178 6179 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6180 6181 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6182 }; 6183 6184 if (!IsFastCall) { 6185 ComputePtrOff(); 6186 6187 /* Compute GPR index associated with argument offset. */ 6188 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 6189 GPR_idx = std::min(GPR_idx, NumGPRs); 6190 } 6191 6192 // Promote integers to 64-bit values. 6193 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 6194 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6195 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6196 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6197 } 6198 6199 // FIXME memcpy is used way more than necessary. Correctness first. 6200 // Note: "by value" is code for passing a structure by value, not 6201 // basic types. 6202 if (Flags.isByVal()) { 6203 // Note: Size includes alignment padding, so 6204 // struct x { short a; char b; } 6205 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 6206 // These are the proper values we need for right-justifying the 6207 // aggregate in a parameter register. 6208 unsigned Size = Flags.getByValSize(); 6209 6210 // An empty aggregate parameter takes up no storage and no 6211 // registers. 6212 if (Size == 0) 6213 continue; 6214 6215 if (IsFastCall) 6216 ComputePtrOff(); 6217 6218 // All aggregates smaller than 8 bytes must be passed right-justified. 6219 if (Size==1 || Size==2 || Size==4) { 6220 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 6221 if (GPR_idx != NumGPRs) { 6222 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6223 MachinePointerInfo(), VT); 6224 MemOpChains.push_back(Load.getValue(1)); 6225 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6226 6227 ArgOffset += PtrByteSize; 6228 continue; 6229 } 6230 } 6231 6232 if (GPR_idx == NumGPRs && Size < 8) { 6233 SDValue AddPtr = PtrOff; 6234 if (!isLittleEndian) { 6235 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6236 PtrOff.getValueType()); 6237 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6238 } 6239 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6240 CallSeqStart, 6241 Flags, DAG, dl); 6242 ArgOffset += PtrByteSize; 6243 continue; 6244 } 6245 // Copy the object to parameter save area if it can not be entirely passed 6246 // by registers. 6247 // FIXME: we only need to copy the parts which need to be passed in 6248 // parameter save area. For the parts passed by registers, we don't need 6249 // to copy them to the stack although we need to allocate space for them 6250 // in parameter save area. 6251 if ((NumGPRs - GPR_idx) * PtrByteSize < Size) 6252 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6253 CallSeqStart, 6254 Flags, DAG, dl); 6255 6256 // When a register is available, pass a small aggregate right-justified. 6257 if (Size < 8 && GPR_idx != NumGPRs) { 6258 // The easiest way to get this right-justified in a register 6259 // is to copy the structure into the rightmost portion of a 6260 // local variable slot, then load the whole slot into the 6261 // register. 6262 // FIXME: The memcpy seems to produce pretty awful code for 6263 // small aggregates, particularly for packed ones. 6264 // FIXME: It would be preferable to use the slot in the 6265 // parameter save area instead of a new local variable. 6266 SDValue AddPtr = PtrOff; 6267 if (!isLittleEndian) { 6268 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 6269 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6270 } 6271 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6272 CallSeqStart, 6273 Flags, DAG, dl); 6274 6275 // Load the slot into the register. 6276 SDValue Load = 6277 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 6278 MemOpChains.push_back(Load.getValue(1)); 6279 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6280 6281 // Done with this argument. 6282 ArgOffset += PtrByteSize; 6283 continue; 6284 } 6285 6286 // For aggregates larger than PtrByteSize, copy the pieces of the 6287 // object that fit into registers from the parameter save area. 6288 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6289 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6290 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6291 if (GPR_idx != NumGPRs) { 6292 unsigned LoadSizeInBits = std::min(PtrByteSize, (Size - j)) * 8; 6293 EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), LoadSizeInBits); 6294 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, AddArg, 6295 MachinePointerInfo(), ObjType); 6296 6297 MemOpChains.push_back(Load.getValue(1)); 6298 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6299 ArgOffset += PtrByteSize; 6300 } else { 6301 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6302 break; 6303 } 6304 } 6305 continue; 6306 } 6307 6308 switch (Arg.getSimpleValueType().SimpleTy) { 6309 default: llvm_unreachable("Unexpected ValueType for argument!"); 6310 case MVT::i1: 6311 case MVT::i32: 6312 case MVT::i64: 6313 if (Flags.isNest()) { 6314 // The 'nest' parameter, if any, is passed in R11. 6315 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 6316 break; 6317 } 6318 6319 // These can be scalar arguments or elements of an integer array type 6320 // passed directly. Clang may use those instead of "byval" aggregate 6321 // types to avoid forcing arguments to memory unnecessarily. 6322 if (GPR_idx != NumGPRs) { 6323 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6324 } else { 6325 if (IsFastCall) 6326 ComputePtrOff(); 6327 6328 assert(HasParameterArea && 6329 "Parameter area must exist to pass an argument in memory."); 6330 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6331 true, CFlags.IsTailCall, false, MemOpChains, 6332 TailCallArguments, dl); 6333 if (IsFastCall) 6334 ArgOffset += PtrByteSize; 6335 } 6336 if (!IsFastCall) 6337 ArgOffset += PtrByteSize; 6338 break; 6339 case MVT::f32: 6340 case MVT::f64: { 6341 // These can be scalar arguments or elements of a float array type 6342 // passed directly. The latter are used to implement ELFv2 homogenous 6343 // float aggregates. 6344 6345 // Named arguments go into FPRs first, and once they overflow, the 6346 // remaining arguments go into GPRs and then the parameter save area. 6347 // Unnamed arguments for vararg functions always go to GPRs and 6348 // then the parameter save area. For now, put all arguments to vararg 6349 // routines always in both locations (FPR *and* GPR or stack slot). 6350 bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs; 6351 bool NeededLoad = false; 6352 6353 // First load the argument into the next available FPR. 6354 if (FPR_idx != NumFPRs) 6355 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6356 6357 // Next, load the argument into GPR or stack slot if needed. 6358 if (!NeedGPROrStack) 6359 ; 6360 else if (GPR_idx != NumGPRs && !IsFastCall) { 6361 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 6362 // once we support fp <-> gpr moves. 6363 6364 // In the non-vararg case, this can only ever happen in the 6365 // presence of f32 array types, since otherwise we never run 6366 // out of FPRs before running out of GPRs. 6367 SDValue ArgVal; 6368 6369 // Double values are always passed in a single GPR. 6370 if (Arg.getValueType() != MVT::f32) { 6371 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 6372 6373 // Non-array float values are extended and passed in a GPR. 6374 } else if (!Flags.isInConsecutiveRegs()) { 6375 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6376 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6377 6378 // If we have an array of floats, we collect every odd element 6379 // together with its predecessor into one GPR. 6380 } else if (ArgOffset % PtrByteSize != 0) { 6381 SDValue Lo, Hi; 6382 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 6383 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6384 if (!isLittleEndian) 6385 std::swap(Lo, Hi); 6386 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6387 6388 // The final element, if even, goes into the first half of a GPR. 6389 } else if (Flags.isInConsecutiveRegsLast()) { 6390 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6391 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6392 if (!isLittleEndian) 6393 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 6394 DAG.getConstant(32, dl, MVT::i32)); 6395 6396 // Non-final even elements are skipped; they will be handled 6397 // together the with subsequent argument on the next go-around. 6398 } else 6399 ArgVal = SDValue(); 6400 6401 if (ArgVal.getNode()) 6402 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 6403 } else { 6404 if (IsFastCall) 6405 ComputePtrOff(); 6406 6407 // Single-precision floating-point values are mapped to the 6408 // second (rightmost) word of the stack doubleword. 6409 if (Arg.getValueType() == MVT::f32 && 6410 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 6411 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6412 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6413 } 6414 6415 assert(HasParameterArea && 6416 "Parameter area must exist to pass an argument in memory."); 6417 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6418 true, CFlags.IsTailCall, false, MemOpChains, 6419 TailCallArguments, dl); 6420 6421 NeededLoad = true; 6422 } 6423 // When passing an array of floats, the array occupies consecutive 6424 // space in the argument area; only round up to the next doubleword 6425 // at the end of the array. Otherwise, each float takes 8 bytes. 6426 if (!IsFastCall || NeededLoad) { 6427 ArgOffset += (Arg.getValueType() == MVT::f32 && 6428 Flags.isInConsecutiveRegs()) ? 4 : 8; 6429 if (Flags.isInConsecutiveRegsLast()) 6430 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 6431 } 6432 break; 6433 } 6434 case MVT::v4f32: 6435 case MVT::v4i32: 6436 case MVT::v8i16: 6437 case MVT::v16i8: 6438 case MVT::v2f64: 6439 case MVT::v2i64: 6440 case MVT::v1i128: 6441 case MVT::f128: 6442 // These can be scalar arguments or elements of a vector array type 6443 // passed directly. The latter are used to implement ELFv2 homogenous 6444 // vector aggregates. 6445 6446 // For a varargs call, named arguments go into VRs or on the stack as 6447 // usual; unnamed arguments always go to the stack or the corresponding 6448 // GPRs when within range. For now, we always put the value in both 6449 // locations (or even all three). 6450 if (CFlags.IsVarArg) { 6451 assert(HasParameterArea && 6452 "Parameter area must exist if we have a varargs call."); 6453 // We could elide this store in the case where the object fits 6454 // entirely in R registers. Maybe later. 6455 SDValue Store = 6456 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6457 MemOpChains.push_back(Store); 6458 if (VR_idx != NumVRs) { 6459 SDValue Load = 6460 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6461 MemOpChains.push_back(Load.getValue(1)); 6462 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6463 } 6464 ArgOffset += 16; 6465 for (unsigned i=0; i<16; i+=PtrByteSize) { 6466 if (GPR_idx == NumGPRs) 6467 break; 6468 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6469 DAG.getConstant(i, dl, PtrVT)); 6470 SDValue Load = 6471 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6472 MemOpChains.push_back(Load.getValue(1)); 6473 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6474 } 6475 break; 6476 } 6477 6478 // Non-varargs Altivec params go into VRs or on the stack. 6479 if (VR_idx != NumVRs) { 6480 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6481 } else { 6482 if (IsFastCall) 6483 ComputePtrOff(); 6484 6485 assert(HasParameterArea && 6486 "Parameter area must exist to pass an argument in memory."); 6487 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6488 true, CFlags.IsTailCall, true, MemOpChains, 6489 TailCallArguments, dl); 6490 if (IsFastCall) 6491 ArgOffset += 16; 6492 } 6493 6494 if (!IsFastCall) 6495 ArgOffset += 16; 6496 break; 6497 } 6498 } 6499 6500 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 6501 "mismatch in size of parameter area"); 6502 (void)NumBytesActuallyUsed; 6503 6504 if (!MemOpChains.empty()) 6505 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6506 6507 // Check if this is an indirect call (MTCTR/BCTRL). 6508 // See prepareDescriptorIndirectCall and buildCallOperands for more 6509 // information about calls through function pointers in the 64-bit SVR4 ABI. 6510 if (CFlags.IsIndirect) { 6511 // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the 6512 // caller in the TOC save area. 6513 if (isTOCSaveRestoreRequired(Subtarget)) { 6514 assert(!CFlags.IsTailCall && "Indirect tails calls not supported"); 6515 // Load r2 into a virtual register and store it to the TOC save area. 6516 setUsesTOCBasePtr(DAG); 6517 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6518 // TOC save area offset. 6519 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6520 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6521 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6522 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, 6523 MachinePointerInfo::getStack( 6524 DAG.getMachineFunction(), TOCSaveOffset)); 6525 } 6526 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6527 // This does not mean the MTCTR instruction must use R12; it's easier 6528 // to model this as an extra parameter, so do that. 6529 if (isELFv2ABI && !CFlags.IsPatchPoint) 6530 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6531 } 6532 6533 // Build a sequence of copy-to-reg nodes chained together with token chain 6534 // and flag operands which copy the outgoing args into the appropriate regs. 6535 SDValue InFlag; 6536 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6537 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6538 RegsToPass[i].second, InFlag); 6539 InFlag = Chain.getValue(1); 6540 } 6541 6542 if (CFlags.IsTailCall && !IsSibCall) 6543 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6544 TailCallArguments); 6545 6546 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 6547 Callee, SPDiff, NumBytes, Ins, InVals, CB); 6548 } 6549 6550 // Returns true when the shadow of a general purpose argument register 6551 // in the parameter save area is aligned to at least 'RequiredAlign'. 6552 static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign) { 6553 assert(RequiredAlign.value() <= 16 && 6554 "Required alignment greater than stack alignment."); 6555 switch (Reg) { 6556 default: 6557 report_fatal_error("called on invalid register."); 6558 case PPC::R5: 6559 case PPC::R9: 6560 case PPC::X3: 6561 case PPC::X5: 6562 case PPC::X7: 6563 case PPC::X9: 6564 // These registers are 16 byte aligned which is the most strict aligment 6565 // we can support. 6566 return true; 6567 case PPC::R3: 6568 case PPC::R7: 6569 case PPC::X4: 6570 case PPC::X6: 6571 case PPC::X8: 6572 case PPC::X10: 6573 // The shadow of these registers in the PSA is 8 byte aligned. 6574 return RequiredAlign <= 8; 6575 case PPC::R4: 6576 case PPC::R6: 6577 case PPC::R8: 6578 case PPC::R10: 6579 return RequiredAlign <= 4; 6580 } 6581 } 6582 6583 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, 6584 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 6585 CCState &S) { 6586 AIXCCState &State = static_cast<AIXCCState &>(S); 6587 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>( 6588 State.getMachineFunction().getSubtarget()); 6589 const bool IsPPC64 = Subtarget.isPPC64(); 6590 const Align PtrAlign = IsPPC64 ? Align(8) : Align(4); 6591 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 6592 6593 if (ValVT == MVT::f128) 6594 report_fatal_error("f128 is unimplemented on AIX."); 6595 6596 if (ArgFlags.isNest()) 6597 report_fatal_error("Nest arguments are unimplemented."); 6598 6599 static const MCPhysReg GPR_32[] = {// 32-bit registers. 6600 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6601 PPC::R7, PPC::R8, PPC::R9, PPC::R10}; 6602 static const MCPhysReg GPR_64[] = {// 64-bit registers. 6603 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6604 PPC::X7, PPC::X8, PPC::X9, PPC::X10}; 6605 6606 static const MCPhysReg VR[] = {// Vector registers. 6607 PPC::V2, PPC::V3, PPC::V4, PPC::V5, 6608 PPC::V6, PPC::V7, PPC::V8, PPC::V9, 6609 PPC::V10, PPC::V11, PPC::V12, PPC::V13}; 6610 6611 if (ArgFlags.isByVal()) { 6612 if (ArgFlags.getNonZeroByValAlign() > PtrAlign) 6613 report_fatal_error("Pass-by-value arguments with alignment greater than " 6614 "register width are not supported."); 6615 6616 const unsigned ByValSize = ArgFlags.getByValSize(); 6617 6618 // An empty aggregate parameter takes up no storage and no registers, 6619 // but needs a MemLoc for a stack slot for the formal arguments side. 6620 if (ByValSize == 0) { 6621 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE, 6622 State.getNextStackOffset(), RegVT, 6623 LocInfo)); 6624 return false; 6625 } 6626 6627 const unsigned StackSize = alignTo(ByValSize, PtrAlign); 6628 unsigned Offset = State.AllocateStack(StackSize, PtrAlign); 6629 for (const unsigned E = Offset + StackSize; Offset < E; 6630 Offset += PtrAlign.value()) { 6631 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) 6632 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6633 else { 6634 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE, 6635 Offset, MVT::INVALID_SIMPLE_VALUE_TYPE, 6636 LocInfo)); 6637 break; 6638 } 6639 } 6640 return false; 6641 } 6642 6643 // Arguments always reserve parameter save area. 6644 switch (ValVT.SimpleTy) { 6645 default: 6646 report_fatal_error("Unhandled value type for argument."); 6647 case MVT::i64: 6648 // i64 arguments should have been split to i32 for PPC32. 6649 assert(IsPPC64 && "PPC32 should have split i64 values."); 6650 LLVM_FALLTHROUGH; 6651 case MVT::i1: 6652 case MVT::i32: { 6653 const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign); 6654 // AIX integer arguments are always passed in register width. 6655 if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits()) 6656 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt 6657 : CCValAssign::LocInfo::ZExt; 6658 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) 6659 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6660 else 6661 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo)); 6662 6663 return false; 6664 } 6665 case MVT::f32: 6666 case MVT::f64: { 6667 // Parameter save area (PSA) is reserved even if the float passes in fpr. 6668 const unsigned StoreSize = LocVT.getStoreSize(); 6669 // Floats are always 4-byte aligned in the PSA on AIX. 6670 // This includes f64 in 64-bit mode for ABI compatibility. 6671 const unsigned Offset = 6672 State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4)); 6673 unsigned FReg = State.AllocateReg(FPR); 6674 if (FReg) 6675 State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo)); 6676 6677 // Reserve and initialize GPRs or initialize the PSA as required. 6678 for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) { 6679 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) { 6680 assert(FReg && "An FPR should be available when a GPR is reserved."); 6681 if (State.isVarArg()) { 6682 // Successfully reserved GPRs are only initialized for vararg calls. 6683 // Custom handling is required for: 6684 // f64 in PPC32 needs to be split into 2 GPRs. 6685 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR. 6686 State.addLoc( 6687 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6688 } 6689 } else { 6690 // If there are insufficient GPRs, the PSA needs to be initialized. 6691 // Initialization occurs even if an FPR was initialized for 6692 // compatibility with the AIX XL compiler. The full memory for the 6693 // argument will be initialized even if a prior word is saved in GPR. 6694 // A custom memLoc is used when the argument also passes in FPR so 6695 // that the callee handling can skip over it easily. 6696 State.addLoc( 6697 FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, 6698 LocInfo) 6699 : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6700 break; 6701 } 6702 } 6703 6704 return false; 6705 } 6706 case MVT::v4f32: 6707 case MVT::v4i32: 6708 case MVT::v8i16: 6709 case MVT::v16i8: 6710 case MVT::v2i64: 6711 case MVT::v2f64: 6712 case MVT::v1i128: { 6713 const unsigned VecSize = 16; 6714 const Align VecAlign(VecSize); 6715 6716 if (!State.isVarArg()) { 6717 // If there are vector registers remaining we don't consume any stack 6718 // space. 6719 if (unsigned VReg = State.AllocateReg(VR)) { 6720 State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo)); 6721 return false; 6722 } 6723 // Vectors passed on the stack do not shadow GPRs or FPRs even though they 6724 // might be allocated in the portion of the PSA that is shadowed by the 6725 // GPRs. 6726 const unsigned Offset = State.AllocateStack(VecSize, VecAlign); 6727 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6728 return false; 6729 } 6730 6731 const unsigned PtrSize = IsPPC64 ? 8 : 4; 6732 ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32; 6733 6734 unsigned NextRegIndex = State.getFirstUnallocated(GPRs); 6735 // Burn any underaligned registers and their shadowed stack space until 6736 // we reach the required alignment. 6737 while (NextRegIndex != GPRs.size() && 6738 !isGPRShadowAligned(GPRs[NextRegIndex], VecAlign)) { 6739 // Shadow allocate register and its stack shadow. 6740 unsigned Reg = State.AllocateReg(GPRs); 6741 State.AllocateStack(PtrSize, PtrAlign); 6742 assert(Reg && "Allocating register unexpectedly failed."); 6743 (void)Reg; 6744 NextRegIndex = State.getFirstUnallocated(GPRs); 6745 } 6746 6747 // Vectors that are passed as fixed arguments are handled differently. 6748 // They are passed in VRs if any are available (unlike arguments passed 6749 // through ellipses) and shadow GPRs (unlike arguments to non-vaarg 6750 // functions) 6751 if (State.isFixed(ValNo)) { 6752 if (unsigned VReg = State.AllocateReg(VR)) { 6753 State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo)); 6754 // Shadow allocate GPRs and stack space even though we pass in a VR. 6755 for (unsigned I = 0; I != VecSize; I += PtrSize) 6756 State.AllocateReg(GPRs); 6757 State.AllocateStack(VecSize, VecAlign); 6758 return false; 6759 } 6760 // No vector registers remain so pass on the stack. 6761 const unsigned Offset = State.AllocateStack(VecSize, VecAlign); 6762 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6763 return false; 6764 } 6765 6766 // If all GPRS are consumed then we pass the argument fully on the stack. 6767 if (NextRegIndex == GPRs.size()) { 6768 const unsigned Offset = State.AllocateStack(VecSize, VecAlign); 6769 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6770 return false; 6771 } 6772 6773 // Corner case for 32-bit codegen. We have 2 registers to pass the first 6774 // half of the argument, and then need to pass the remaining half on the 6775 // stack. 6776 if (GPRs[NextRegIndex] == PPC::R9) { 6777 const unsigned Offset = State.AllocateStack(VecSize, VecAlign); 6778 State.addLoc( 6779 CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6780 6781 const unsigned FirstReg = State.AllocateReg(PPC::R9); 6782 const unsigned SecondReg = State.AllocateReg(PPC::R10); 6783 assert(FirstReg && SecondReg && 6784 "Allocating R9 or R10 unexpectedly failed."); 6785 State.addLoc( 6786 CCValAssign::getCustomReg(ValNo, ValVT, FirstReg, RegVT, LocInfo)); 6787 State.addLoc( 6788 CCValAssign::getCustomReg(ValNo, ValVT, SecondReg, RegVT, LocInfo)); 6789 return false; 6790 } 6791 6792 // We have enough GPRs to fully pass the vector argument, and we have 6793 // already consumed any underaligned registers. Start with the custom 6794 // MemLoc and then the custom RegLocs. 6795 const unsigned Offset = State.AllocateStack(VecSize, VecAlign); 6796 State.addLoc( 6797 CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 6798 for (unsigned I = 0; I != VecSize; I += PtrSize) { 6799 const unsigned Reg = State.AllocateReg(GPRs); 6800 assert(Reg && "Failed to allocated register for vararg vector argument"); 6801 State.addLoc( 6802 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6803 } 6804 return false; 6805 } 6806 } 6807 return true; 6808 } 6809 6810 // So far, this function is only used by LowerFormalArguments_AIX() 6811 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT, 6812 bool IsPPC64, 6813 bool HasP8Vector, 6814 bool HasVSX) { 6815 assert((IsPPC64 || SVT != MVT::i64) && 6816 "i64 should have been split for 32-bit codegen."); 6817 6818 switch (SVT) { 6819 default: 6820 report_fatal_error("Unexpected value type for formal argument"); 6821 case MVT::i1: 6822 case MVT::i32: 6823 case MVT::i64: 6824 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6825 case MVT::f32: 6826 return HasP8Vector ? &PPC::VSSRCRegClass : &PPC::F4RCRegClass; 6827 case MVT::f64: 6828 return HasVSX ? &PPC::VSFRCRegClass : &PPC::F8RCRegClass; 6829 case MVT::v4f32: 6830 case MVT::v4i32: 6831 case MVT::v8i16: 6832 case MVT::v16i8: 6833 case MVT::v2i64: 6834 case MVT::v2f64: 6835 case MVT::v1i128: 6836 return &PPC::VRRCRegClass; 6837 } 6838 } 6839 6840 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT, 6841 SelectionDAG &DAG, SDValue ArgValue, 6842 MVT LocVT, const SDLoc &dl) { 6843 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger()); 6844 assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits()); 6845 6846 if (Flags.isSExt()) 6847 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue, 6848 DAG.getValueType(ValVT)); 6849 else if (Flags.isZExt()) 6850 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue, 6851 DAG.getValueType(ValVT)); 6852 6853 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue); 6854 } 6855 6856 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) { 6857 const unsigned LASize = FL->getLinkageSize(); 6858 6859 if (PPC::GPRCRegClass.contains(Reg)) { 6860 assert(Reg >= PPC::R3 && Reg <= PPC::R10 && 6861 "Reg must be a valid argument register!"); 6862 return LASize + 4 * (Reg - PPC::R3); 6863 } 6864 6865 if (PPC::G8RCRegClass.contains(Reg)) { 6866 assert(Reg >= PPC::X3 && Reg <= PPC::X10 && 6867 "Reg must be a valid argument register!"); 6868 return LASize + 8 * (Reg - PPC::X3); 6869 } 6870 6871 llvm_unreachable("Only general purpose registers expected."); 6872 } 6873 6874 // AIX ABI Stack Frame Layout: 6875 // 6876 // Low Memory +--------------------------------------------+ 6877 // SP +---> | Back chain | ---+ 6878 // | +--------------------------------------------+ | 6879 // | | Saved Condition Register | | 6880 // | +--------------------------------------------+ | 6881 // | | Saved Linkage Register | | 6882 // | +--------------------------------------------+ | Linkage Area 6883 // | | Reserved for compilers | | 6884 // | +--------------------------------------------+ | 6885 // | | Reserved for binders | | 6886 // | +--------------------------------------------+ | 6887 // | | Saved TOC pointer | ---+ 6888 // | +--------------------------------------------+ 6889 // | | Parameter save area | 6890 // | +--------------------------------------------+ 6891 // | | Alloca space | 6892 // | +--------------------------------------------+ 6893 // | | Local variable space | 6894 // | +--------------------------------------------+ 6895 // | | Float/int conversion temporary | 6896 // | +--------------------------------------------+ 6897 // | | Save area for AltiVec registers | 6898 // | +--------------------------------------------+ 6899 // | | AltiVec alignment padding | 6900 // | +--------------------------------------------+ 6901 // | | Save area for VRSAVE register | 6902 // | +--------------------------------------------+ 6903 // | | Save area for General Purpose registers | 6904 // | +--------------------------------------------+ 6905 // | | Save area for Floating Point registers | 6906 // | +--------------------------------------------+ 6907 // +---- | Back chain | 6908 // High Memory +--------------------------------------------+ 6909 // 6910 // Specifications: 6911 // AIX 7.2 Assembler Language Reference 6912 // Subroutine linkage convention 6913 6914 SDValue PPCTargetLowering::LowerFormalArguments_AIX( 6915 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 6916 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6917 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 6918 6919 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold || 6920 CallConv == CallingConv::Fast) && 6921 "Unexpected calling convention!"); 6922 6923 if (getTargetMachine().Options.GuaranteedTailCallOpt) 6924 report_fatal_error("Tail call support is unimplemented on AIX."); 6925 6926 if (useSoftFloat()) 6927 report_fatal_error("Soft float support is unimplemented on AIX."); 6928 6929 const PPCSubtarget &Subtarget = DAG.getSubtarget<PPCSubtarget>(); 6930 6931 const bool IsPPC64 = Subtarget.isPPC64(); 6932 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 6933 6934 // Assign locations to all of the incoming arguments. 6935 SmallVector<CCValAssign, 16> ArgLocs; 6936 MachineFunction &MF = DAG.getMachineFunction(); 6937 MachineFrameInfo &MFI = MF.getFrameInfo(); 6938 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 6939 AIXCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 6940 6941 const EVT PtrVT = getPointerTy(MF.getDataLayout()); 6942 // Reserve space for the linkage area on the stack. 6943 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6944 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize)); 6945 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX); 6946 6947 SmallVector<SDValue, 8> MemOps; 6948 6949 for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) { 6950 CCValAssign &VA = ArgLocs[I++]; 6951 MVT LocVT = VA.getLocVT(); 6952 MVT ValVT = VA.getValVT(); 6953 ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags; 6954 // For compatibility with the AIX XL compiler, the float args in the 6955 // parameter save area are initialized even if the argument is available 6956 // in register. The caller is required to initialize both the register 6957 // and memory, however, the callee can choose to expect it in either. 6958 // The memloc is dismissed here because the argument is retrieved from 6959 // the register. 6960 if (VA.isMemLoc() && VA.needsCustom() && ValVT.isFloatingPoint()) 6961 continue; 6962 6963 auto HandleMemLoc = [&]() { 6964 const unsigned LocSize = LocVT.getStoreSize(); 6965 const unsigned ValSize = ValVT.getStoreSize(); 6966 assert((ValSize <= LocSize) && 6967 "Object size is larger than size of MemLoc"); 6968 int CurArgOffset = VA.getLocMemOffset(); 6969 // Objects are right-justified because AIX is big-endian. 6970 if (LocSize > ValSize) 6971 CurArgOffset += LocSize - ValSize; 6972 // Potential tail calls could cause overwriting of argument stack slots. 6973 const bool IsImmutable = 6974 !(getTargetMachine().Options.GuaranteedTailCallOpt && 6975 (CallConv == CallingConv::Fast)); 6976 int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable); 6977 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 6978 SDValue ArgValue = 6979 DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo()); 6980 InVals.push_back(ArgValue); 6981 }; 6982 6983 // Vector arguments to VaArg functions are passed both on the stack, and 6984 // in any available GPRs. Load the value from the stack and add the GPRs 6985 // as live ins. 6986 if (VA.isMemLoc() && VA.needsCustom()) { 6987 assert(ValVT.isVector() && "Unexpected Custom MemLoc type."); 6988 assert(isVarArg && "Only use custom memloc for vararg."); 6989 // ValNo of the custom MemLoc, so we can compare it to the ValNo of the 6990 // matching custom RegLocs. 6991 const unsigned OriginalValNo = VA.getValNo(); 6992 (void)OriginalValNo; 6993 6994 auto HandleCustomVecRegLoc = [&]() { 6995 assert(I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && 6996 "Missing custom RegLoc."); 6997 VA = ArgLocs[I++]; 6998 assert(VA.getValVT().isVector() && 6999 "Unexpected Val type for custom RegLoc."); 7000 assert(VA.getValNo() == OriginalValNo && 7001 "ValNo mismatch between custom MemLoc and RegLoc."); 7002 MVT::SimpleValueType SVT = VA.getLocVT().SimpleTy; 7003 MF.addLiveIn(VA.getLocReg(), 7004 getRegClassForSVT(SVT, IsPPC64, Subtarget.hasP8Vector(), 7005 Subtarget.hasVSX())); 7006 }; 7007 7008 HandleMemLoc(); 7009 // In 64-bit there will be exactly 2 custom RegLocs that follow, and in 7010 // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and 7011 // R10. 7012 HandleCustomVecRegLoc(); 7013 HandleCustomVecRegLoc(); 7014 7015 // If we are targeting 32-bit, there might be 2 extra custom RegLocs if 7016 // we passed the vector in R5, R6, R7 and R8. 7017 if (I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom()) { 7018 assert(!IsPPC64 && 7019 "Only 2 custom RegLocs expected for 64-bit codegen."); 7020 HandleCustomVecRegLoc(); 7021 HandleCustomVecRegLoc(); 7022 } 7023 7024 continue; 7025 } 7026 7027 if (VA.isRegLoc()) { 7028 if (VA.getValVT().isScalarInteger()) 7029 FuncInfo->appendParameterType(PPCFunctionInfo::FixedType); 7030 else if (VA.getValVT().isFloatingPoint() && !VA.getValVT().isVector()) { 7031 switch (VA.getValVT().SimpleTy) { 7032 default: 7033 report_fatal_error("Unhandled value type for argument."); 7034 case MVT::f32: 7035 FuncInfo->appendParameterType(PPCFunctionInfo::ShortFloatingPoint); 7036 break; 7037 case MVT::f64: 7038 FuncInfo->appendParameterType(PPCFunctionInfo::LongFloatingPoint); 7039 break; 7040 } 7041 } else if (VA.getValVT().isVector()) { 7042 switch (VA.getValVT().SimpleTy) { 7043 default: 7044 report_fatal_error("Unhandled value type for argument."); 7045 case MVT::v16i8: 7046 FuncInfo->appendParameterType(PPCFunctionInfo::VectorChar); 7047 break; 7048 case MVT::v8i16: 7049 FuncInfo->appendParameterType(PPCFunctionInfo::VectorShort); 7050 break; 7051 case MVT::v4i32: 7052 case MVT::v2i64: 7053 case MVT::v1i128: 7054 FuncInfo->appendParameterType(PPCFunctionInfo::VectorInt); 7055 break; 7056 case MVT::v4f32: 7057 case MVT::v2f64: 7058 FuncInfo->appendParameterType(PPCFunctionInfo::VectorFloat); 7059 break; 7060 } 7061 } 7062 } 7063 7064 if (Flags.isByVal() && VA.isMemLoc()) { 7065 const unsigned Size = 7066 alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize, 7067 PtrByteSize); 7068 const int FI = MF.getFrameInfo().CreateFixedObject( 7069 Size, VA.getLocMemOffset(), /* IsImmutable */ false, 7070 /* IsAliased */ true); 7071 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 7072 InVals.push_back(FIN); 7073 7074 continue; 7075 } 7076 7077 if (Flags.isByVal()) { 7078 assert(VA.isRegLoc() && "MemLocs should already be handled."); 7079 7080 const MCPhysReg ArgReg = VA.getLocReg(); 7081 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 7082 7083 if (Flags.getNonZeroByValAlign() > PtrByteSize) 7084 report_fatal_error("Over aligned byvals not supported yet."); 7085 7086 const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize); 7087 const int FI = MF.getFrameInfo().CreateFixedObject( 7088 StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false, 7089 /* IsAliased */ true); 7090 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 7091 InVals.push_back(FIN); 7092 7093 // Add live ins for all the RegLocs for the same ByVal. 7094 const TargetRegisterClass *RegClass = 7095 IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 7096 7097 auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg, 7098 unsigned Offset) { 7099 const Register VReg = MF.addLiveIn(PhysReg, RegClass); 7100 // Since the callers side has left justified the aggregate in the 7101 // register, we can simply store the entire register into the stack 7102 // slot. 7103 SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT); 7104 // The store to the fixedstack object is needed becuase accessing a 7105 // field of the ByVal will use a gep and load. Ideally we will optimize 7106 // to extracting the value from the register directly, and elide the 7107 // stores when the arguments address is not taken, but that will need to 7108 // be future work. 7109 SDValue Store = DAG.getStore( 7110 CopyFrom.getValue(1), dl, CopyFrom, 7111 DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)), 7112 MachinePointerInfo::getFixedStack(MF, FI, Offset)); 7113 7114 MemOps.push_back(Store); 7115 }; 7116 7117 unsigned Offset = 0; 7118 HandleRegLoc(VA.getLocReg(), Offset); 7119 Offset += PtrByteSize; 7120 for (; Offset != StackSize && ArgLocs[I].isRegLoc(); 7121 Offset += PtrByteSize) { 7122 assert(ArgLocs[I].getValNo() == VA.getValNo() && 7123 "RegLocs should be for ByVal argument."); 7124 7125 const CCValAssign RL = ArgLocs[I++]; 7126 HandleRegLoc(RL.getLocReg(), Offset); 7127 FuncInfo->appendParameterType(PPCFunctionInfo::FixedType); 7128 } 7129 7130 if (Offset != StackSize) { 7131 assert(ArgLocs[I].getValNo() == VA.getValNo() && 7132 "Expected MemLoc for remaining bytes."); 7133 assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes."); 7134 // Consume the MemLoc.The InVal has already been emitted, so nothing 7135 // more needs to be done. 7136 ++I; 7137 } 7138 7139 continue; 7140 } 7141 7142 if (VA.isRegLoc() && !VA.needsCustom()) { 7143 MVT::SimpleValueType SVT = ValVT.SimpleTy; 7144 Register VReg = 7145 MF.addLiveIn(VA.getLocReg(), 7146 getRegClassForSVT(SVT, IsPPC64, Subtarget.hasP8Vector(), 7147 Subtarget.hasVSX())); 7148 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT); 7149 if (ValVT.isScalarInteger() && 7150 (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) { 7151 ArgValue = 7152 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl); 7153 } 7154 InVals.push_back(ArgValue); 7155 continue; 7156 } 7157 if (VA.isMemLoc()) { 7158 HandleMemLoc(); 7159 continue; 7160 } 7161 } 7162 7163 // On AIX a minimum of 8 words is saved to the parameter save area. 7164 const unsigned MinParameterSaveArea = 8 * PtrByteSize; 7165 // Area that is at least reserved in the caller of this function. 7166 unsigned CallerReservedArea = 7167 std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea); 7168 7169 // Set the size that is at least reserved in caller of this function. Tail 7170 // call optimized function's reserved stack space needs to be aligned so 7171 // that taking the difference between two stack areas will result in an 7172 // aligned stack. 7173 CallerReservedArea = 7174 EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea); 7175 FuncInfo->setMinReservedArea(CallerReservedArea); 7176 7177 if (isVarArg) { 7178 FuncInfo->setVarArgsFrameIndex( 7179 MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true)); 7180 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 7181 7182 static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6, 7183 PPC::R7, PPC::R8, PPC::R9, PPC::R10}; 7184 7185 static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6, 7186 PPC::X7, PPC::X8, PPC::X9, PPC::X10}; 7187 const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32); 7188 7189 // The fixed integer arguments of a variadic function are stored to the 7190 // VarArgsFrameIndex on the stack so that they may be loaded by 7191 // dereferencing the result of va_next. 7192 for (unsigned GPRIndex = 7193 (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize; 7194 GPRIndex < NumGPArgRegs; ++GPRIndex) { 7195 7196 const Register VReg = 7197 IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass) 7198 : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass); 7199 7200 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 7201 SDValue Store = 7202 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 7203 MemOps.push_back(Store); 7204 // Increment the address for the next argument to store. 7205 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 7206 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 7207 } 7208 } 7209 7210 if (!MemOps.empty()) 7211 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 7212 7213 return Chain; 7214 } 7215 7216 SDValue PPCTargetLowering::LowerCall_AIX( 7217 SDValue Chain, SDValue Callee, CallFlags CFlags, 7218 const SmallVectorImpl<ISD::OutputArg> &Outs, 7219 const SmallVectorImpl<SDValue> &OutVals, 7220 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 7221 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 7222 const CallBase *CB) const { 7223 // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the 7224 // AIX ABI stack frame layout. 7225 7226 assert((CFlags.CallConv == CallingConv::C || 7227 CFlags.CallConv == CallingConv::Cold || 7228 CFlags.CallConv == CallingConv::Fast) && 7229 "Unexpected calling convention!"); 7230 7231 if (CFlags.IsPatchPoint) 7232 report_fatal_error("This call type is unimplemented on AIX."); 7233 7234 const PPCSubtarget &Subtarget = DAG.getSubtarget<PPCSubtarget>(); 7235 7236 MachineFunction &MF = DAG.getMachineFunction(); 7237 SmallVector<CCValAssign, 16> ArgLocs; 7238 AIXCCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs, 7239 *DAG.getContext()); 7240 7241 // Reserve space for the linkage save area (LSA) on the stack. 7242 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA: 7243 // [SP][CR][LR][2 x reserved][TOC]. 7244 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64. 7245 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 7246 const bool IsPPC64 = Subtarget.isPPC64(); 7247 const EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7248 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 7249 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize)); 7250 CCInfo.AnalyzeCallOperands(Outs, CC_AIX); 7251 7252 // The prolog code of the callee may store up to 8 GPR argument registers to 7253 // the stack, allowing va_start to index over them in memory if the callee 7254 // is variadic. 7255 // Because we cannot tell if this is needed on the caller side, we have to 7256 // conservatively assume that it is needed. As such, make sure we have at 7257 // least enough stack space for the caller to store the 8 GPRs. 7258 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize; 7259 const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize, 7260 CCInfo.getNextStackOffset()); 7261 7262 // Adjust the stack pointer for the new arguments... 7263 // These operations are automatically eliminated by the prolog/epilog pass. 7264 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 7265 SDValue CallSeqStart = Chain; 7266 7267 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 7268 SmallVector<SDValue, 8> MemOpChains; 7269 7270 // Set up a copy of the stack pointer for loading and storing any 7271 // arguments that may not fit in the registers available for argument 7272 // passing. 7273 const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64) 7274 : DAG.getRegister(PPC::R1, MVT::i32); 7275 7276 for (unsigned I = 0, E = ArgLocs.size(); I != E;) { 7277 const unsigned ValNo = ArgLocs[I].getValNo(); 7278 SDValue Arg = OutVals[ValNo]; 7279 ISD::ArgFlagsTy Flags = Outs[ValNo].Flags; 7280 7281 if (Flags.isByVal()) { 7282 const unsigned ByValSize = Flags.getByValSize(); 7283 7284 // Nothing to do for zero-sized ByVals on the caller side. 7285 if (!ByValSize) { 7286 ++I; 7287 continue; 7288 } 7289 7290 auto GetLoad = [&](EVT VT, unsigned LoadOffset) { 7291 return DAG.getExtLoad( 7292 ISD::ZEXTLOAD, dl, PtrVT, Chain, 7293 (LoadOffset != 0) 7294 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset)) 7295 : Arg, 7296 MachinePointerInfo(), VT); 7297 }; 7298 7299 unsigned LoadOffset = 0; 7300 7301 // Initialize registers, which are fully occupied by the by-val argument. 7302 while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) { 7303 SDValue Load = GetLoad(PtrVT, LoadOffset); 7304 MemOpChains.push_back(Load.getValue(1)); 7305 LoadOffset += PtrByteSize; 7306 const CCValAssign &ByValVA = ArgLocs[I++]; 7307 assert(ByValVA.getValNo() == ValNo && 7308 "Unexpected location for pass-by-value argument."); 7309 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load)); 7310 } 7311 7312 if (LoadOffset == ByValSize) 7313 continue; 7314 7315 // There must be one more loc to handle the remainder. 7316 assert(ArgLocs[I].getValNo() == ValNo && 7317 "Expected additional location for by-value argument."); 7318 7319 if (ArgLocs[I].isMemLoc()) { 7320 assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg."); 7321 const CCValAssign &ByValVA = ArgLocs[I++]; 7322 ISD::ArgFlagsTy MemcpyFlags = Flags; 7323 // Only memcpy the bytes that don't pass in register. 7324 MemcpyFlags.setByValSize(ByValSize - LoadOffset); 7325 Chain = CallSeqStart = createMemcpyOutsideCallSeq( 7326 (LoadOffset != 0) 7327 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset)) 7328 : Arg, 7329 DAG.getObjectPtrOffset(dl, StackPtr, 7330 TypeSize::Fixed(ByValVA.getLocMemOffset())), 7331 CallSeqStart, MemcpyFlags, DAG, dl); 7332 continue; 7333 } 7334 7335 // Initialize the final register residue. 7336 // Any residue that occupies the final by-val arg register must be 7337 // left-justified on AIX. Loads must be a power-of-2 size and cannot be 7338 // larger than the ByValSize. For example: a 7 byte by-val arg requires 4, 7339 // 2 and 1 byte loads. 7340 const unsigned ResidueBytes = ByValSize % PtrByteSize; 7341 assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize && 7342 "Unexpected register residue for by-value argument."); 7343 SDValue ResidueVal; 7344 for (unsigned Bytes = 0; Bytes != ResidueBytes;) { 7345 const unsigned N = PowerOf2Floor(ResidueBytes - Bytes); 7346 const MVT VT = 7347 N == 1 ? MVT::i8 7348 : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64)); 7349 SDValue Load = GetLoad(VT, LoadOffset); 7350 MemOpChains.push_back(Load.getValue(1)); 7351 LoadOffset += N; 7352 Bytes += N; 7353 7354 // By-val arguments are passed left-justfied in register. 7355 // Every load here needs to be shifted, otherwise a full register load 7356 // should have been used. 7357 assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) && 7358 "Unexpected load emitted during handling of pass-by-value " 7359 "argument."); 7360 unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8); 7361 EVT ShiftAmountTy = 7362 getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout()); 7363 SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy); 7364 SDValue ShiftedLoad = 7365 DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt); 7366 ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal, 7367 ShiftedLoad) 7368 : ShiftedLoad; 7369 } 7370 7371 const CCValAssign &ByValVA = ArgLocs[I++]; 7372 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal)); 7373 continue; 7374 } 7375 7376 CCValAssign &VA = ArgLocs[I++]; 7377 const MVT LocVT = VA.getLocVT(); 7378 const MVT ValVT = VA.getValVT(); 7379 7380 switch (VA.getLocInfo()) { 7381 default: 7382 report_fatal_error("Unexpected argument extension type."); 7383 case CCValAssign::Full: 7384 break; 7385 case CCValAssign::ZExt: 7386 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 7387 break; 7388 case CCValAssign::SExt: 7389 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 7390 break; 7391 } 7392 7393 if (VA.isRegLoc() && !VA.needsCustom()) { 7394 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 7395 continue; 7396 } 7397 7398 // Vector arguments passed to VarArg functions need custom handling when 7399 // they are passed (at least partially) in GPRs. 7400 if (VA.isMemLoc() && VA.needsCustom() && ValVT.isVector()) { 7401 assert(CFlags.IsVarArg && "Custom MemLocs only used for Vector args."); 7402 // Store value to its stack slot. 7403 SDValue PtrOff = 7404 DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType()); 7405 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 7406 SDValue Store = 7407 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 7408 MemOpChains.push_back(Store); 7409 const unsigned OriginalValNo = VA.getValNo(); 7410 // Then load the GPRs from the stack 7411 unsigned LoadOffset = 0; 7412 auto HandleCustomVecRegLoc = [&]() { 7413 assert(I != E && "Unexpected end of CCvalAssigns."); 7414 assert(ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && 7415 "Expected custom RegLoc."); 7416 CCValAssign RegVA = ArgLocs[I++]; 7417 assert(RegVA.getValNo() == OriginalValNo && 7418 "Custom MemLoc ValNo and custom RegLoc ValNo must match."); 7419 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 7420 DAG.getConstant(LoadOffset, dl, PtrVT)); 7421 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Add, MachinePointerInfo()); 7422 MemOpChains.push_back(Load.getValue(1)); 7423 RegsToPass.push_back(std::make_pair(RegVA.getLocReg(), Load)); 7424 LoadOffset += PtrByteSize; 7425 }; 7426 7427 // In 64-bit there will be exactly 2 custom RegLocs that follow, and in 7428 // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and 7429 // R10. 7430 HandleCustomVecRegLoc(); 7431 HandleCustomVecRegLoc(); 7432 7433 if (I != E && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && 7434 ArgLocs[I].getValNo() == OriginalValNo) { 7435 assert(!IsPPC64 && 7436 "Only 2 custom RegLocs expected for 64-bit codegen."); 7437 HandleCustomVecRegLoc(); 7438 HandleCustomVecRegLoc(); 7439 } 7440 7441 continue; 7442 } 7443 7444 if (VA.isMemLoc()) { 7445 SDValue PtrOff = 7446 DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType()); 7447 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 7448 MemOpChains.push_back( 7449 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 7450 7451 continue; 7452 } 7453 7454 if (!ValVT.isFloatingPoint()) 7455 report_fatal_error( 7456 "Unexpected register handling for calling convention."); 7457 7458 // Custom handling is used for GPR initializations for vararg float 7459 // arguments. 7460 assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg && 7461 LocVT.isInteger() && 7462 "Custom register handling only expected for VarArg."); 7463 7464 SDValue ArgAsInt = 7465 DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg); 7466 7467 if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize()) 7468 // f32 in 32-bit GPR 7469 // f64 in 64-bit GPR 7470 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt)); 7471 else if (Arg.getValueType().getFixedSizeInBits() < 7472 LocVT.getFixedSizeInBits()) 7473 // f32 in 64-bit GPR. 7474 RegsToPass.push_back(std::make_pair( 7475 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT))); 7476 else { 7477 // f64 in two 32-bit GPRs 7478 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs. 7479 assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 && 7480 "Unexpected custom register for argument!"); 7481 CCValAssign &GPR1 = VA; 7482 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt, 7483 DAG.getConstant(32, dl, MVT::i8)); 7484 RegsToPass.push_back(std::make_pair( 7485 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32))); 7486 7487 if (I != E) { 7488 // If only 1 GPR was available, there will only be one custom GPR and 7489 // the argument will also pass in memory. 7490 CCValAssign &PeekArg = ArgLocs[I]; 7491 if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) { 7492 assert(PeekArg.needsCustom() && "A second custom GPR is expected."); 7493 CCValAssign &GPR2 = ArgLocs[I++]; 7494 RegsToPass.push_back(std::make_pair( 7495 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32))); 7496 } 7497 } 7498 } 7499 } 7500 7501 if (!MemOpChains.empty()) 7502 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 7503 7504 // For indirect calls, we need to save the TOC base to the stack for 7505 // restoration after the call. 7506 if (CFlags.IsIndirect) { 7507 assert(!CFlags.IsTailCall && "Indirect tail-calls not supported."); 7508 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister(); 7509 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 7510 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 7511 const unsigned TOCSaveOffset = 7512 Subtarget.getFrameLowering()->getTOCSaveOffset(); 7513 7514 setUsesTOCBasePtr(DAG); 7515 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT); 7516 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 7517 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT); 7518 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 7519 Chain = DAG.getStore( 7520 Val.getValue(1), dl, Val, AddPtr, 7521 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 7522 } 7523 7524 // Build a sequence of copy-to-reg nodes chained together with token chain 7525 // and flag operands which copy the outgoing args into the appropriate regs. 7526 SDValue InFlag; 7527 for (auto Reg : RegsToPass) { 7528 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag); 7529 InFlag = Chain.getValue(1); 7530 } 7531 7532 const int SPDiff = 0; 7533 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart, 7534 Callee, SPDiff, NumBytes, Ins, InVals, CB); 7535 } 7536 7537 bool 7538 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 7539 MachineFunction &MF, bool isVarArg, 7540 const SmallVectorImpl<ISD::OutputArg> &Outs, 7541 LLVMContext &Context) const { 7542 SmallVector<CCValAssign, 16> RVLocs; 7543 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 7544 return CCInfo.CheckReturn( 7545 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7546 ? RetCC_PPC_Cold 7547 : RetCC_PPC); 7548 } 7549 7550 SDValue 7551 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 7552 bool isVarArg, 7553 const SmallVectorImpl<ISD::OutputArg> &Outs, 7554 const SmallVectorImpl<SDValue> &OutVals, 7555 const SDLoc &dl, SelectionDAG &DAG) const { 7556 SmallVector<CCValAssign, 16> RVLocs; 7557 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 7558 *DAG.getContext()); 7559 CCInfo.AnalyzeReturn(Outs, 7560 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7561 ? RetCC_PPC_Cold 7562 : RetCC_PPC); 7563 7564 SDValue Flag; 7565 SmallVector<SDValue, 4> RetOps(1, Chain); 7566 7567 // Copy the result values into the output registers. 7568 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) { 7569 CCValAssign &VA = RVLocs[i]; 7570 assert(VA.isRegLoc() && "Can only return in registers!"); 7571 7572 SDValue Arg = OutVals[RealResIdx]; 7573 7574 switch (VA.getLocInfo()) { 7575 default: llvm_unreachable("Unknown loc info!"); 7576 case CCValAssign::Full: break; 7577 case CCValAssign::AExt: 7578 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 7579 break; 7580 case CCValAssign::ZExt: 7581 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 7582 break; 7583 case CCValAssign::SExt: 7584 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 7585 break; 7586 } 7587 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 7588 bool isLittleEndian = Subtarget.isLittleEndian(); 7589 // Legalize ret f64 -> ret 2 x i32. 7590 SDValue SVal = 7591 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7592 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl)); 7593 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7594 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7595 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7596 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl)); 7597 Flag = Chain.getValue(1); 7598 VA = RVLocs[++i]; // skip ahead to next loc 7599 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7600 } else 7601 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 7602 Flag = Chain.getValue(1); 7603 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7604 } 7605 7606 RetOps[0] = Chain; // Update chain. 7607 7608 // Add the flag if we have it. 7609 if (Flag.getNode()) 7610 RetOps.push_back(Flag); 7611 7612 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 7613 } 7614 7615 SDValue 7616 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 7617 SelectionDAG &DAG) const { 7618 SDLoc dl(Op); 7619 7620 // Get the correct type for integers. 7621 EVT IntVT = Op.getValueType(); 7622 7623 // Get the inputs. 7624 SDValue Chain = Op.getOperand(0); 7625 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7626 // Build a DYNAREAOFFSET node. 7627 SDValue Ops[2] = {Chain, FPSIdx}; 7628 SDVTList VTs = DAG.getVTList(IntVT); 7629 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 7630 } 7631 7632 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 7633 SelectionDAG &DAG) const { 7634 // When we pop the dynamic allocation we need to restore the SP link. 7635 SDLoc dl(Op); 7636 7637 // Get the correct type for pointers. 7638 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7639 7640 // Construct the stack pointer operand. 7641 bool isPPC64 = Subtarget.isPPC64(); 7642 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 7643 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 7644 7645 // Get the operands for the STACKRESTORE. 7646 SDValue Chain = Op.getOperand(0); 7647 SDValue SaveSP = Op.getOperand(1); 7648 7649 // Load the old link SP. 7650 SDValue LoadLinkSP = 7651 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 7652 7653 // Restore the stack pointer. 7654 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 7655 7656 // Store the old link SP. 7657 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 7658 } 7659 7660 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 7661 MachineFunction &MF = DAG.getMachineFunction(); 7662 bool isPPC64 = Subtarget.isPPC64(); 7663 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7664 7665 // Get current frame pointer save index. The users of this index will be 7666 // primarily DYNALLOC instructions. 7667 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7668 int RASI = FI->getReturnAddrSaveIndex(); 7669 7670 // If the frame pointer save index hasn't been defined yet. 7671 if (!RASI) { 7672 // Find out what the fix offset of the frame pointer save area. 7673 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 7674 // Allocate the frame index for frame pointer save area. 7675 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 7676 // Save the result. 7677 FI->setReturnAddrSaveIndex(RASI); 7678 } 7679 return DAG.getFrameIndex(RASI, PtrVT); 7680 } 7681 7682 SDValue 7683 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 7684 MachineFunction &MF = DAG.getMachineFunction(); 7685 bool isPPC64 = Subtarget.isPPC64(); 7686 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7687 7688 // Get current frame pointer save index. The users of this index will be 7689 // primarily DYNALLOC instructions. 7690 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7691 int FPSI = FI->getFramePointerSaveIndex(); 7692 7693 // If the frame pointer save index hasn't been defined yet. 7694 if (!FPSI) { 7695 // Find out what the fix offset of the frame pointer save area. 7696 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 7697 // Allocate the frame index for frame pointer save area. 7698 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 7699 // Save the result. 7700 FI->setFramePointerSaveIndex(FPSI); 7701 } 7702 return DAG.getFrameIndex(FPSI, PtrVT); 7703 } 7704 7705 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 7706 SelectionDAG &DAG) const { 7707 MachineFunction &MF = DAG.getMachineFunction(); 7708 // Get the inputs. 7709 SDValue Chain = Op.getOperand(0); 7710 SDValue Size = Op.getOperand(1); 7711 SDLoc dl(Op); 7712 7713 // Get the correct type for pointers. 7714 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7715 // Negate the size. 7716 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 7717 DAG.getConstant(0, dl, PtrVT), Size); 7718 // Construct a node for the frame pointer save index. 7719 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7720 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 7721 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 7722 if (hasInlineStackProbe(MF)) 7723 return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops); 7724 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 7725 } 7726 7727 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 7728 SelectionDAG &DAG) const { 7729 MachineFunction &MF = DAG.getMachineFunction(); 7730 7731 bool isPPC64 = Subtarget.isPPC64(); 7732 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7733 7734 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 7735 return DAG.getFrameIndex(FI, PtrVT); 7736 } 7737 7738 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 7739 SelectionDAG &DAG) const { 7740 SDLoc DL(Op); 7741 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 7742 DAG.getVTList(MVT::i32, MVT::Other), 7743 Op.getOperand(0), Op.getOperand(1)); 7744 } 7745 7746 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 7747 SelectionDAG &DAG) const { 7748 SDLoc DL(Op); 7749 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 7750 Op.getOperand(0), Op.getOperand(1)); 7751 } 7752 7753 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 7754 if (Op.getValueType().isVector()) 7755 return LowerVectorLoad(Op, DAG); 7756 7757 assert(Op.getValueType() == MVT::i1 && 7758 "Custom lowering only for i1 loads"); 7759 7760 // First, load 8 bits into 32 bits, then truncate to 1 bit. 7761 7762 SDLoc dl(Op); 7763 LoadSDNode *LD = cast<LoadSDNode>(Op); 7764 7765 SDValue Chain = LD->getChain(); 7766 SDValue BasePtr = LD->getBasePtr(); 7767 MachineMemOperand *MMO = LD->getMemOperand(); 7768 7769 SDValue NewLD = 7770 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 7771 BasePtr, MVT::i8, MMO); 7772 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 7773 7774 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 7775 return DAG.getMergeValues(Ops, dl); 7776 } 7777 7778 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 7779 if (Op.getOperand(1).getValueType().isVector()) 7780 return LowerVectorStore(Op, DAG); 7781 7782 assert(Op.getOperand(1).getValueType() == MVT::i1 && 7783 "Custom lowering only for i1 stores"); 7784 7785 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 7786 7787 SDLoc dl(Op); 7788 StoreSDNode *ST = cast<StoreSDNode>(Op); 7789 7790 SDValue Chain = ST->getChain(); 7791 SDValue BasePtr = ST->getBasePtr(); 7792 SDValue Value = ST->getValue(); 7793 MachineMemOperand *MMO = ST->getMemOperand(); 7794 7795 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 7796 Value); 7797 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 7798 } 7799 7800 // FIXME: Remove this once the ANDI glue bug is fixed: 7801 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 7802 assert(Op.getValueType() == MVT::i1 && 7803 "Custom lowering only for i1 results"); 7804 7805 SDLoc DL(Op); 7806 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0)); 7807 } 7808 7809 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op, 7810 SelectionDAG &DAG) const { 7811 7812 // Implements a vector truncate that fits in a vector register as a shuffle. 7813 // We want to legalize vector truncates down to where the source fits in 7814 // a vector register (and target is therefore smaller than vector register 7815 // size). At that point legalization will try to custom lower the sub-legal 7816 // result and get here - where we can contain the truncate as a single target 7817 // operation. 7818 7819 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows: 7820 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2> 7821 // 7822 // We will implement it for big-endian ordering as this (where x denotes 7823 // undefined): 7824 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to 7825 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u> 7826 // 7827 // The same operation in little-endian ordering will be: 7828 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to 7829 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1> 7830 7831 EVT TrgVT = Op.getValueType(); 7832 assert(TrgVT.isVector() && "Vector type expected."); 7833 unsigned TrgNumElts = TrgVT.getVectorNumElements(); 7834 EVT EltVT = TrgVT.getVectorElementType(); 7835 if (!isOperationCustom(Op.getOpcode(), TrgVT) || 7836 TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) || 7837 !isPowerOf2_32(EltVT.getSizeInBits())) 7838 return SDValue(); 7839 7840 SDValue N1 = Op.getOperand(0); 7841 EVT SrcVT = N1.getValueType(); 7842 unsigned SrcSize = SrcVT.getSizeInBits(); 7843 if (SrcSize > 256 || 7844 !isPowerOf2_32(SrcVT.getVectorNumElements()) || 7845 !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits())) 7846 return SDValue(); 7847 if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2) 7848 return SDValue(); 7849 7850 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7851 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7852 7853 SDLoc DL(Op); 7854 SDValue Op1, Op2; 7855 if (SrcSize == 256) { 7856 EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout()); 7857 EVT SplitVT = 7858 N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext()); 7859 unsigned SplitNumElts = SplitVT.getVectorNumElements(); 7860 Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1, 7861 DAG.getConstant(0, DL, VecIdxTy)); 7862 Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1, 7863 DAG.getConstant(SplitNumElts, DL, VecIdxTy)); 7864 } 7865 else { 7866 Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL); 7867 Op2 = DAG.getUNDEF(WideVT); 7868 } 7869 7870 // First list the elements we want to keep. 7871 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits(); 7872 SmallVector<int, 16> ShuffV; 7873 if (Subtarget.isLittleEndian()) 7874 for (unsigned i = 0; i < TrgNumElts; ++i) 7875 ShuffV.push_back(i * SizeMult); 7876 else 7877 for (unsigned i = 1; i <= TrgNumElts; ++i) 7878 ShuffV.push_back(i * SizeMult - 1); 7879 7880 // Populate the remaining elements with undefs. 7881 for (unsigned i = TrgNumElts; i < WideNumElts; ++i) 7882 // ShuffV.push_back(i + WideNumElts); 7883 ShuffV.push_back(WideNumElts + 1); 7884 7885 Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1); 7886 Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2); 7887 return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV); 7888 } 7889 7890 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 7891 /// possible. 7892 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 7893 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 7894 EVT ResVT = Op.getValueType(); 7895 EVT CmpVT = Op.getOperand(0).getValueType(); 7896 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7897 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 7898 SDLoc dl(Op); 7899 7900 // Without power9-vector, we don't have native instruction for f128 comparison. 7901 // Following transformation to libcall is needed for setcc: 7902 // select_cc lhs, rhs, tv, fv, cc -> select_cc (setcc cc, x, y), 0, tv, fv, NE 7903 if (!Subtarget.hasP9Vector() && CmpVT == MVT::f128) { 7904 SDValue Z = DAG.getSetCC( 7905 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT), 7906 LHS, RHS, CC); 7907 SDValue Zero = DAG.getConstant(0, dl, Z.getValueType()); 7908 return DAG.getSelectCC(dl, Z, Zero, TV, FV, ISD::SETNE); 7909 } 7910 7911 // Not FP, or using SPE? Not a fsel. 7912 if (!CmpVT.isFloatingPoint() || !TV.getValueType().isFloatingPoint() || 7913 Subtarget.hasSPE()) 7914 return Op; 7915 7916 SDNodeFlags Flags = Op.getNode()->getFlags(); 7917 7918 // We have xsmaxc[dq]p/xsminc[dq]p which are OK to emit even in the 7919 // presence of infinities. 7920 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) { 7921 switch (CC) { 7922 default: 7923 break; 7924 case ISD::SETOGT: 7925 case ISD::SETGT: 7926 return DAG.getNode(PPCISD::XSMAXC, dl, Op.getValueType(), LHS, RHS); 7927 case ISD::SETOLT: 7928 case ISD::SETLT: 7929 return DAG.getNode(PPCISD::XSMINC, dl, Op.getValueType(), LHS, RHS); 7930 } 7931 } 7932 7933 // We might be able to do better than this under some circumstances, but in 7934 // general, fsel-based lowering of select is a finite-math-only optimization. 7935 // For more information, see section F.3 of the 2.06 ISA specification. 7936 // With ISA 3.0 7937 if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) || 7938 (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs())) 7939 return Op; 7940 7941 // If the RHS of the comparison is a 0.0, we don't need to do the 7942 // subtraction at all. 7943 SDValue Sel1; 7944 if (isFloatingPointZero(RHS)) 7945 switch (CC) { 7946 default: break; // SETUO etc aren't handled by fsel. 7947 case ISD::SETNE: 7948 std::swap(TV, FV); 7949 LLVM_FALLTHROUGH; 7950 case ISD::SETEQ: 7951 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7952 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7953 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7954 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7955 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7956 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7957 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 7958 case ISD::SETULT: 7959 case ISD::SETLT: 7960 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7961 LLVM_FALLTHROUGH; 7962 case ISD::SETOGE: 7963 case ISD::SETGE: 7964 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7965 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7966 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7967 case ISD::SETUGT: 7968 case ISD::SETGT: 7969 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7970 LLVM_FALLTHROUGH; 7971 case ISD::SETOLE: 7972 case ISD::SETLE: 7973 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7974 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7975 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7976 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 7977 } 7978 7979 SDValue Cmp; 7980 switch (CC) { 7981 default: break; // SETUO etc aren't handled by fsel. 7982 case ISD::SETNE: 7983 std::swap(TV, FV); 7984 LLVM_FALLTHROUGH; 7985 case ISD::SETEQ: 7986 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7987 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7988 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7989 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7990 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7991 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7992 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7993 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 7994 case ISD::SETULT: 7995 case ISD::SETLT: 7996 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7997 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7998 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7999 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 8000 case ISD::SETOGE: 8001 case ISD::SETGE: 8002 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 8003 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 8004 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 8005 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 8006 case ISD::SETUGT: 8007 case ISD::SETGT: 8008 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 8009 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 8010 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 8011 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 8012 case ISD::SETOLE: 8013 case ISD::SETLE: 8014 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 8015 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 8016 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 8017 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 8018 } 8019 return Op; 8020 } 8021 8022 static unsigned getPPCStrictOpcode(unsigned Opc) { 8023 switch (Opc) { 8024 default: 8025 llvm_unreachable("No strict version of this opcode!"); 8026 case PPCISD::FCTIDZ: 8027 return PPCISD::STRICT_FCTIDZ; 8028 case PPCISD::FCTIWZ: 8029 return PPCISD::STRICT_FCTIWZ; 8030 case PPCISD::FCTIDUZ: 8031 return PPCISD::STRICT_FCTIDUZ; 8032 case PPCISD::FCTIWUZ: 8033 return PPCISD::STRICT_FCTIWUZ; 8034 case PPCISD::FCFID: 8035 return PPCISD::STRICT_FCFID; 8036 case PPCISD::FCFIDU: 8037 return PPCISD::STRICT_FCFIDU; 8038 case PPCISD::FCFIDS: 8039 return PPCISD::STRICT_FCFIDS; 8040 case PPCISD::FCFIDUS: 8041 return PPCISD::STRICT_FCFIDUS; 8042 } 8043 } 8044 8045 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG, 8046 const PPCSubtarget &Subtarget) { 8047 SDLoc dl(Op); 8048 bool IsStrict = Op->isStrictFPOpcode(); 8049 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT || 8050 Op.getOpcode() == ISD::STRICT_FP_TO_SINT; 8051 8052 // TODO: Any other flags to propagate? 8053 SDNodeFlags Flags; 8054 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 8055 8056 // For strict nodes, source is the second operand. 8057 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 8058 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); 8059 assert(Src.getValueType().isFloatingPoint()); 8060 if (Src.getValueType() == MVT::f32) { 8061 if (IsStrict) { 8062 Src = 8063 DAG.getNode(ISD::STRICT_FP_EXTEND, dl, 8064 DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags); 8065 Chain = Src.getValue(1); 8066 } else 8067 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 8068 } 8069 SDValue Conv; 8070 unsigned Opc = ISD::DELETED_NODE; 8071 switch (Op.getSimpleValueType().SimpleTy) { 8072 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 8073 case MVT::i32: 8074 Opc = IsSigned ? PPCISD::FCTIWZ 8075 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ); 8076 break; 8077 case MVT::i64: 8078 assert((IsSigned || Subtarget.hasFPCVT()) && 8079 "i64 FP_TO_UINT is supported only with FPCVT"); 8080 Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ; 8081 } 8082 if (IsStrict) { 8083 Opc = getPPCStrictOpcode(Opc); 8084 Conv = DAG.getNode(Opc, dl, DAG.getVTList(MVT::f64, MVT::Other), 8085 {Chain, Src}, Flags); 8086 } else { 8087 Conv = DAG.getNode(Opc, dl, MVT::f64, Src); 8088 } 8089 return Conv; 8090 } 8091 8092 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 8093 SelectionDAG &DAG, 8094 const SDLoc &dl) const { 8095 SDValue Tmp = convertFPToInt(Op, DAG, Subtarget); 8096 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT || 8097 Op.getOpcode() == ISD::STRICT_FP_TO_SINT; 8098 bool IsStrict = Op->isStrictFPOpcode(); 8099 8100 // Convert the FP value to an int value through memory. 8101 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 8102 (IsSigned || Subtarget.hasFPCVT()); 8103 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 8104 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 8105 MachinePointerInfo MPI = 8106 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 8107 8108 // Emit a store to the stack slot. 8109 SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode(); 8110 Align Alignment(DAG.getEVTAlign(Tmp.getValueType())); 8111 if (i32Stack) { 8112 MachineFunction &MF = DAG.getMachineFunction(); 8113 Alignment = Align(4); 8114 MachineMemOperand *MMO = 8115 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment); 8116 SDValue Ops[] = { Chain, Tmp, FIPtr }; 8117 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 8118 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 8119 } else 8120 Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment); 8121 8122 // Result is a load from the stack slot. If loading 4 bytes, make sure to 8123 // add in a bias on big endian. 8124 if (Op.getValueType() == MVT::i32 && !i32Stack) { 8125 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 8126 DAG.getConstant(4, dl, FIPtr.getValueType())); 8127 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 8128 } 8129 8130 RLI.Chain = Chain; 8131 RLI.Ptr = FIPtr; 8132 RLI.MPI = MPI; 8133 RLI.Alignment = Alignment; 8134 } 8135 8136 /// Custom lowers floating point to integer conversions to use 8137 /// the direct move instructions available in ISA 2.07 to avoid the 8138 /// need for load/store combinations. 8139 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 8140 SelectionDAG &DAG, 8141 const SDLoc &dl) const { 8142 SDValue Conv = convertFPToInt(Op, DAG, Subtarget); 8143 SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv); 8144 if (Op->isStrictFPOpcode()) 8145 return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl); 8146 else 8147 return Mov; 8148 } 8149 8150 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 8151 const SDLoc &dl) const { 8152 bool IsStrict = Op->isStrictFPOpcode(); 8153 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT || 8154 Op.getOpcode() == ISD::STRICT_FP_TO_SINT; 8155 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 8156 EVT SrcVT = Src.getValueType(); 8157 EVT DstVT = Op.getValueType(); 8158 8159 // FP to INT conversions are legal for f128. 8160 if (SrcVT == MVT::f128) 8161 return Subtarget.hasP9Vector() ? Op : SDValue(); 8162 8163 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 8164 // PPC (the libcall is not available). 8165 if (SrcVT == MVT::ppcf128) { 8166 if (DstVT == MVT::i32) { 8167 // TODO: Conservatively pass only nofpexcept flag here. Need to check and 8168 // set other fast-math flags to FP operations in both strict and 8169 // non-strict cases. (FP_TO_SINT, FSUB) 8170 SDNodeFlags Flags; 8171 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 8172 8173 if (IsSigned) { 8174 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src, 8175 DAG.getIntPtrConstant(0, dl)); 8176 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src, 8177 DAG.getIntPtrConstant(1, dl)); 8178 8179 // Add the two halves of the long double in round-to-zero mode, and use 8180 // a smaller FP_TO_SINT. 8181 if (IsStrict) { 8182 SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl, 8183 DAG.getVTList(MVT::f64, MVT::Other), 8184 {Op.getOperand(0), Lo, Hi}, Flags); 8185 return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, 8186 DAG.getVTList(MVT::i32, MVT::Other), 8187 {Res.getValue(1), Res}, Flags); 8188 } else { 8189 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 8190 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res); 8191 } 8192 } else { 8193 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0}; 8194 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31)); 8195 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 8196 SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT); 8197 if (IsStrict) { 8198 // Sel = Src < 0x80000000 8199 // FltOfs = select Sel, 0.0, 0x80000000 8200 // IntOfs = select Sel, 0, 0x80000000 8201 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 8202 SDValue Chain = Op.getOperand(0); 8203 EVT SetCCVT = 8204 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 8205 EVT DstSetCCVT = 8206 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 8207 SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 8208 Chain, true); 8209 Chain = Sel.getValue(1); 8210 8211 SDValue FltOfs = DAG.getSelect( 8212 dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst); 8213 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 8214 8215 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, 8216 DAG.getVTList(SrcVT, MVT::Other), 8217 {Chain, Src, FltOfs}, Flags); 8218 Chain = Val.getValue(1); 8219 SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, 8220 DAG.getVTList(DstVT, MVT::Other), 8221 {Chain, Val}, Flags); 8222 Chain = SInt.getValue(1); 8223 SDValue IntOfs = DAG.getSelect( 8224 dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask); 8225 SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 8226 return DAG.getMergeValues({Result, Chain}, dl); 8227 } else { 8228 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X 8229 // FIXME: generated code sucks. 8230 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst); 8231 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True); 8232 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask); 8233 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src); 8234 return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE); 8235 } 8236 } 8237 } 8238 8239 return SDValue(); 8240 } 8241 8242 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 8243 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 8244 8245 ReuseLoadInfo RLI; 8246 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 8247 8248 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 8249 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 8250 } 8251 8252 // We're trying to insert a regular store, S, and then a load, L. If the 8253 // incoming value, O, is a load, we might just be able to have our load use the 8254 // address used by O. However, we don't know if anything else will store to 8255 // that address before we can load from it. To prevent this situation, we need 8256 // to insert our load, L, into the chain as a peer of O. To do this, we give L 8257 // the same chain operand as O, we create a token factor from the chain results 8258 // of O and L, and we replace all uses of O's chain result with that token 8259 // factor (see spliceIntoChain below for this last part). 8260 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 8261 ReuseLoadInfo &RLI, 8262 SelectionDAG &DAG, 8263 ISD::LoadExtType ET) const { 8264 // Conservatively skip reusing for constrained FP nodes. 8265 if (Op->isStrictFPOpcode()) 8266 return false; 8267 8268 SDLoc dl(Op); 8269 bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT && 8270 (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32); 8271 if (ET == ISD::NON_EXTLOAD && 8272 (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) && 8273 isOperationLegalOrCustom(Op.getOpcode(), 8274 Op.getOperand(0).getValueType())) { 8275 8276 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 8277 return true; 8278 } 8279 8280 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 8281 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 8282 LD->isNonTemporal()) 8283 return false; 8284 if (LD->getMemoryVT() != MemVT) 8285 return false; 8286 8287 // If the result of the load is an illegal type, then we can't build a 8288 // valid chain for reuse since the legalised loads and token factor node that 8289 // ties the legalised loads together uses a different output chain then the 8290 // illegal load. 8291 if (!isTypeLegal(LD->getValueType(0))) 8292 return false; 8293 8294 RLI.Ptr = LD->getBasePtr(); 8295 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 8296 assert(LD->getAddressingMode() == ISD::PRE_INC && 8297 "Non-pre-inc AM on PPC?"); 8298 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 8299 LD->getOffset()); 8300 } 8301 8302 RLI.Chain = LD->getChain(); 8303 RLI.MPI = LD->getPointerInfo(); 8304 RLI.IsDereferenceable = LD->isDereferenceable(); 8305 RLI.IsInvariant = LD->isInvariant(); 8306 RLI.Alignment = LD->getAlign(); 8307 RLI.AAInfo = LD->getAAInfo(); 8308 RLI.Ranges = LD->getRanges(); 8309 8310 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 8311 return true; 8312 } 8313 8314 // Given the head of the old chain, ResChain, insert a token factor containing 8315 // it and NewResChain, and make users of ResChain now be users of that token 8316 // factor. 8317 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 8318 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 8319 SDValue NewResChain, 8320 SelectionDAG &DAG) const { 8321 if (!ResChain) 8322 return; 8323 8324 SDLoc dl(NewResChain); 8325 8326 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 8327 NewResChain, DAG.getUNDEF(MVT::Other)); 8328 assert(TF.getNode() != NewResChain.getNode() && 8329 "A new TF really is required here"); 8330 8331 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 8332 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 8333 } 8334 8335 /// Analyze profitability of direct move 8336 /// prefer float load to int load plus direct move 8337 /// when there is no integer use of int load 8338 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 8339 SDNode *Origin = Op.getOperand(0).getNode(); 8340 if (Origin->getOpcode() != ISD::LOAD) 8341 return true; 8342 8343 // If there is no LXSIBZX/LXSIHZX, like Power8, 8344 // prefer direct move if the memory size is 1 or 2 bytes. 8345 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 8346 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 8347 return true; 8348 8349 for (SDNode::use_iterator UI = Origin->use_begin(), 8350 UE = Origin->use_end(); 8351 UI != UE; ++UI) { 8352 8353 // Only look at the users of the loaded value. 8354 if (UI.getUse().get().getResNo() != 0) 8355 continue; 8356 8357 if (UI->getOpcode() != ISD::SINT_TO_FP && 8358 UI->getOpcode() != ISD::UINT_TO_FP && 8359 UI->getOpcode() != ISD::STRICT_SINT_TO_FP && 8360 UI->getOpcode() != ISD::STRICT_UINT_TO_FP) 8361 return true; 8362 } 8363 8364 return false; 8365 } 8366 8367 static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG, 8368 const PPCSubtarget &Subtarget, 8369 SDValue Chain = SDValue()) { 8370 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP || 8371 Op.getOpcode() == ISD::STRICT_SINT_TO_FP; 8372 SDLoc dl(Op); 8373 8374 // TODO: Any other flags to propagate? 8375 SDNodeFlags Flags; 8376 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 8377 8378 // If we have FCFIDS, then use it when converting to single-precision. 8379 // Otherwise, convert to double-precision and then round. 8380 bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT(); 8381 unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS) 8382 : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU); 8383 EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64; 8384 if (Op->isStrictFPOpcode()) { 8385 if (!Chain) 8386 Chain = Op.getOperand(0); 8387 return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl, 8388 DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags); 8389 } else 8390 return DAG.getNode(ConvOpc, dl, ConvTy, Src); 8391 } 8392 8393 /// Custom lowers integer to floating point conversions to use 8394 /// the direct move instructions available in ISA 2.07 to avoid the 8395 /// need for load/store combinations. 8396 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 8397 SelectionDAG &DAG, 8398 const SDLoc &dl) const { 8399 assert((Op.getValueType() == MVT::f32 || 8400 Op.getValueType() == MVT::f64) && 8401 "Invalid floating point type as target of conversion"); 8402 assert(Subtarget.hasFPCVT() && 8403 "Int to FP conversions with direct moves require FPCVT"); 8404 SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0); 8405 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 8406 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP || 8407 Op.getOpcode() == ISD::STRICT_SINT_TO_FP; 8408 unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA; 8409 SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src); 8410 return convertIntToFP(Op, Mov, DAG, Subtarget); 8411 } 8412 8413 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) { 8414 8415 EVT VecVT = Vec.getValueType(); 8416 assert(VecVT.isVector() && "Expected a vector type."); 8417 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width."); 8418 8419 EVT EltVT = VecVT.getVectorElementType(); 8420 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 8421 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 8422 8423 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements(); 8424 SmallVector<SDValue, 16> Ops(NumConcat); 8425 Ops[0] = Vec; 8426 SDValue UndefVec = DAG.getUNDEF(VecVT); 8427 for (unsigned i = 1; i < NumConcat; ++i) 8428 Ops[i] = UndefVec; 8429 8430 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops); 8431 } 8432 8433 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG, 8434 const SDLoc &dl) const { 8435 bool IsStrict = Op->isStrictFPOpcode(); 8436 unsigned Opc = Op.getOpcode(); 8437 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 8438 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP || 8439 Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) && 8440 "Unexpected conversion type"); 8441 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) && 8442 "Supports conversions to v2f64/v4f32 only."); 8443 8444 // TODO: Any other flags to propagate? 8445 SDNodeFlags Flags; 8446 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 8447 8448 bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP; 8449 bool FourEltRes = Op.getValueType() == MVT::v4f32; 8450 8451 SDValue Wide = widenVec(DAG, Src, dl); 8452 EVT WideVT = Wide.getValueType(); 8453 unsigned WideNumElts = WideVT.getVectorNumElements(); 8454 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64; 8455 8456 SmallVector<int, 16> ShuffV; 8457 for (unsigned i = 0; i < WideNumElts; ++i) 8458 ShuffV.push_back(i + WideNumElts); 8459 8460 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2; 8461 int SaveElts = FourEltRes ? 4 : 2; 8462 if (Subtarget.isLittleEndian()) 8463 for (int i = 0; i < SaveElts; i++) 8464 ShuffV[i * Stride] = i; 8465 else 8466 for (int i = 1; i <= SaveElts; i++) 8467 ShuffV[i * Stride - 1] = i - 1; 8468 8469 SDValue ShuffleSrc2 = 8470 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT); 8471 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV); 8472 8473 SDValue Extend; 8474 if (SignedConv) { 8475 Arrange = DAG.getBitcast(IntermediateVT, Arrange); 8476 EVT ExtVT = Src.getValueType(); 8477 if (Subtarget.hasP9Altivec()) 8478 ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(), 8479 IntermediateVT.getVectorNumElements()); 8480 8481 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange, 8482 DAG.getValueType(ExtVT)); 8483 } else 8484 Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange); 8485 8486 if (IsStrict) 8487 return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other), 8488 {Op.getOperand(0), Extend}, Flags); 8489 8490 return DAG.getNode(Opc, dl, Op.getValueType(), Extend); 8491 } 8492 8493 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 8494 SelectionDAG &DAG) const { 8495 SDLoc dl(Op); 8496 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP || 8497 Op.getOpcode() == ISD::STRICT_SINT_TO_FP; 8498 bool IsStrict = Op->isStrictFPOpcode(); 8499 SDValue Src = Op.getOperand(IsStrict ? 1 : 0); 8500 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode(); 8501 8502 // TODO: Any other flags to propagate? 8503 SDNodeFlags Flags; 8504 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept()); 8505 8506 EVT InVT = Src.getValueType(); 8507 EVT OutVT = Op.getValueType(); 8508 if (OutVT.isVector() && OutVT.isFloatingPoint() && 8509 isOperationCustom(Op.getOpcode(), InVT)) 8510 return LowerINT_TO_FPVector(Op, DAG, dl); 8511 8512 // Conversions to f128 are legal. 8513 if (Op.getValueType() == MVT::f128) 8514 return Subtarget.hasP9Vector() ? Op : SDValue(); 8515 8516 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 8517 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 8518 return SDValue(); 8519 8520 if (Src.getValueType() == MVT::i1) { 8521 SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src, 8522 DAG.getConstantFP(1.0, dl, Op.getValueType()), 8523 DAG.getConstantFP(0.0, dl, Op.getValueType())); 8524 if (IsStrict) 8525 return DAG.getMergeValues({Sel, Chain}, dl); 8526 else 8527 return Sel; 8528 } 8529 8530 // If we have direct moves, we can do all the conversion, skip the store/load 8531 // however, without FPCVT we can't do most conversions. 8532 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 8533 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 8534 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 8535 8536 assert((IsSigned || Subtarget.hasFPCVT()) && 8537 "UINT_TO_FP is supported only with FPCVT"); 8538 8539 if (Src.getValueType() == MVT::i64) { 8540 SDValue SINT = Src; 8541 // When converting to single-precision, we actually need to convert 8542 // to double-precision first and then round to single-precision. 8543 // To avoid double-rounding effects during that operation, we have 8544 // to prepare the input operand. Bits that might be truncated when 8545 // converting to double-precision are replaced by a bit that won't 8546 // be lost at this stage, but is below the single-precision rounding 8547 // position. 8548 // 8549 // However, if -enable-unsafe-fp-math is in effect, accept double 8550 // rounding to avoid the extra overhead. 8551 if (Op.getValueType() == MVT::f32 && 8552 !Subtarget.hasFPCVT() && 8553 !DAG.getTarget().Options.UnsafeFPMath) { 8554 8555 // Twiddle input to make sure the low 11 bits are zero. (If this 8556 // is the case, we are guaranteed the value will fit into the 53 bit 8557 // mantissa of an IEEE double-precision value without rounding.) 8558 // If any of those low 11 bits were not zero originally, make sure 8559 // bit 12 (value 2048) is set instead, so that the final rounding 8560 // to single-precision gets the correct result. 8561 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8562 SINT, DAG.getConstant(2047, dl, MVT::i64)); 8563 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 8564 Round, DAG.getConstant(2047, dl, MVT::i64)); 8565 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 8566 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8567 Round, DAG.getConstant(-2048, dl, MVT::i64)); 8568 8569 // However, we cannot use that value unconditionally: if the magnitude 8570 // of the input value is small, the bit-twiddling we did above might 8571 // end up visibly changing the output. Fortunately, in that case, we 8572 // don't need to twiddle bits since the original input will convert 8573 // exactly to double-precision floating-point already. Therefore, 8574 // construct a conditional to use the original value if the top 11 8575 // bits are all sign-bit copies, and use the rounded value computed 8576 // above otherwise. 8577 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 8578 SINT, DAG.getConstant(53, dl, MVT::i32)); 8579 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 8580 Cond, DAG.getConstant(1, dl, MVT::i64)); 8581 Cond = DAG.getSetCC( 8582 dl, 8583 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64), 8584 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 8585 8586 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 8587 } 8588 8589 ReuseLoadInfo RLI; 8590 SDValue Bits; 8591 8592 MachineFunction &MF = DAG.getMachineFunction(); 8593 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 8594 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 8595 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 8596 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8597 } else if (Subtarget.hasLFIWAX() && 8598 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 8599 MachineMemOperand *MMO = 8600 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8601 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8602 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8603 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 8604 DAG.getVTList(MVT::f64, MVT::Other), 8605 Ops, MVT::i32, MMO); 8606 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8607 } else if (Subtarget.hasFPCVT() && 8608 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 8609 MachineMemOperand *MMO = 8610 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8611 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8612 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8613 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 8614 DAG.getVTList(MVT::f64, MVT::Other), 8615 Ops, MVT::i32, MMO); 8616 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8617 } else if (((Subtarget.hasLFIWAX() && 8618 SINT.getOpcode() == ISD::SIGN_EXTEND) || 8619 (Subtarget.hasFPCVT() && 8620 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 8621 SINT.getOperand(0).getValueType() == MVT::i32) { 8622 MachineFrameInfo &MFI = MF.getFrameInfo(); 8623 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8624 8625 int FrameIdx = MFI.CreateStackObject(4, Align(4), false); 8626 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8627 8628 SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx, 8629 MachinePointerInfo::getFixedStack( 8630 DAG.getMachineFunction(), FrameIdx)); 8631 Chain = Store; 8632 8633 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8634 "Expected an i32 store"); 8635 8636 RLI.Ptr = FIdx; 8637 RLI.Chain = Chain; 8638 RLI.MPI = 8639 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8640 RLI.Alignment = Align(4); 8641 8642 MachineMemOperand *MMO = 8643 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8644 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8645 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8646 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 8647 PPCISD::LFIWZX : PPCISD::LFIWAX, 8648 dl, DAG.getVTList(MVT::f64, MVT::Other), 8649 Ops, MVT::i32, MMO); 8650 Chain = Bits.getValue(1); 8651 } else 8652 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 8653 8654 SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain); 8655 if (IsStrict) 8656 Chain = FP.getValue(1); 8657 8658 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 8659 if (IsStrict) 8660 FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, 8661 DAG.getVTList(MVT::f32, MVT::Other), 8662 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags); 8663 else 8664 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 8665 DAG.getIntPtrConstant(0, dl)); 8666 } 8667 return FP; 8668 } 8669 8670 assert(Src.getValueType() == MVT::i32 && 8671 "Unhandled INT_TO_FP type in custom expander!"); 8672 // Since we only generate this in 64-bit mode, we can take advantage of 8673 // 64-bit registers. In particular, sign extend the input value into the 8674 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 8675 // then lfd it and fcfid it. 8676 MachineFunction &MF = DAG.getMachineFunction(); 8677 MachineFrameInfo &MFI = MF.getFrameInfo(); 8678 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8679 8680 SDValue Ld; 8681 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 8682 ReuseLoadInfo RLI; 8683 bool ReusingLoad; 8684 if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) { 8685 int FrameIdx = MFI.CreateStackObject(4, Align(4), false); 8686 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8687 8688 SDValue Store = DAG.getStore(Chain, dl, Src, FIdx, 8689 MachinePointerInfo::getFixedStack( 8690 DAG.getMachineFunction(), FrameIdx)); 8691 Chain = Store; 8692 8693 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8694 "Expected an i32 store"); 8695 8696 RLI.Ptr = FIdx; 8697 RLI.Chain = Chain; 8698 RLI.MPI = 8699 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8700 RLI.Alignment = Align(4); 8701 } 8702 8703 MachineMemOperand *MMO = 8704 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8705 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8706 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8707 Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl, 8708 DAG.getVTList(MVT::f64, MVT::Other), Ops, 8709 MVT::i32, MMO); 8710 Chain = Ld.getValue(1); 8711 if (ReusingLoad) 8712 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 8713 } else { 8714 assert(Subtarget.isPPC64() && 8715 "i32->FP without LFIWAX supported only on PPC64"); 8716 8717 int FrameIdx = MFI.CreateStackObject(8, Align(8), false); 8718 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8719 8720 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src); 8721 8722 // STD the extended value into the stack slot. 8723 SDValue Store = DAG.getStore( 8724 Chain, dl, Ext64, FIdx, 8725 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8726 Chain = Store; 8727 8728 // Load the value as a double. 8729 Ld = DAG.getLoad( 8730 MVT::f64, dl, Chain, FIdx, 8731 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8732 Chain = Ld.getValue(1); 8733 } 8734 8735 // FCFID it and return it. 8736 SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain); 8737 if (IsStrict) 8738 Chain = FP.getValue(1); 8739 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 8740 if (IsStrict) 8741 FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl, 8742 DAG.getVTList(MVT::f32, MVT::Other), 8743 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags); 8744 else 8745 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 8746 DAG.getIntPtrConstant(0, dl)); 8747 } 8748 return FP; 8749 } 8750 8751 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 8752 SelectionDAG &DAG) const { 8753 SDLoc dl(Op); 8754 /* 8755 The rounding mode is in bits 30:31 of FPSR, and has the following 8756 settings: 8757 00 Round to nearest 8758 01 Round to 0 8759 10 Round to +inf 8760 11 Round to -inf 8761 8762 FLT_ROUNDS, on the other hand, expects the following: 8763 -1 Undefined 8764 0 Round to 0 8765 1 Round to nearest 8766 2 Round to +inf 8767 3 Round to -inf 8768 8769 To perform the conversion, we do: 8770 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 8771 */ 8772 8773 MachineFunction &MF = DAG.getMachineFunction(); 8774 EVT VT = Op.getValueType(); 8775 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8776 8777 // Save FP Control Word to register 8778 SDValue Chain = Op.getOperand(0); 8779 SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain); 8780 Chain = MFFS.getValue(1); 8781 8782 SDValue CWD; 8783 if (isTypeLegal(MVT::i64)) { 8784 CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, 8785 DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS)); 8786 } else { 8787 // Save FP register to stack slot 8788 int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false); 8789 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 8790 Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo()); 8791 8792 // Load FP Control Word from low 32 bits of stack slot. 8793 assert(hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) && 8794 "Stack slot adjustment is valid only on big endian subtargets!"); 8795 SDValue Four = DAG.getConstant(4, dl, PtrVT); 8796 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 8797 CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo()); 8798 Chain = CWD.getValue(1); 8799 } 8800 8801 // Transform as necessary 8802 SDValue CWD1 = 8803 DAG.getNode(ISD::AND, dl, MVT::i32, 8804 CWD, DAG.getConstant(3, dl, MVT::i32)); 8805 SDValue CWD2 = 8806 DAG.getNode(ISD::SRL, dl, MVT::i32, 8807 DAG.getNode(ISD::AND, dl, MVT::i32, 8808 DAG.getNode(ISD::XOR, dl, MVT::i32, 8809 CWD, DAG.getConstant(3, dl, MVT::i32)), 8810 DAG.getConstant(3, dl, MVT::i32)), 8811 DAG.getConstant(1, dl, MVT::i32)); 8812 8813 SDValue RetVal = 8814 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 8815 8816 RetVal = 8817 DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND), 8818 dl, VT, RetVal); 8819 8820 return DAG.getMergeValues({RetVal, Chain}, dl); 8821 } 8822 8823 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8824 EVT VT = Op.getValueType(); 8825 unsigned BitWidth = VT.getSizeInBits(); 8826 SDLoc dl(Op); 8827 assert(Op.getNumOperands() == 3 && 8828 VT == Op.getOperand(1).getValueType() && 8829 "Unexpected SHL!"); 8830 8831 // Expand into a bunch of logical ops. Note that these ops 8832 // depend on the PPC behavior for oversized shift amounts. 8833 SDValue Lo = Op.getOperand(0); 8834 SDValue Hi = Op.getOperand(1); 8835 SDValue Amt = Op.getOperand(2); 8836 EVT AmtVT = Amt.getValueType(); 8837 8838 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8839 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8840 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 8841 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 8842 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 8843 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8844 DAG.getConstant(-BitWidth, dl, AmtVT)); 8845 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 8846 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8847 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 8848 SDValue OutOps[] = { OutLo, OutHi }; 8849 return DAG.getMergeValues(OutOps, dl); 8850 } 8851 8852 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8853 EVT VT = Op.getValueType(); 8854 SDLoc dl(Op); 8855 unsigned BitWidth = VT.getSizeInBits(); 8856 assert(Op.getNumOperands() == 3 && 8857 VT == Op.getOperand(1).getValueType() && 8858 "Unexpected SRL!"); 8859 8860 // Expand into a bunch of logical ops. Note that these ops 8861 // depend on the PPC behavior for oversized shift amounts. 8862 SDValue Lo = Op.getOperand(0); 8863 SDValue Hi = Op.getOperand(1); 8864 SDValue Amt = Op.getOperand(2); 8865 EVT AmtVT = Amt.getValueType(); 8866 8867 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8868 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8869 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8870 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8871 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8872 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8873 DAG.getConstant(-BitWidth, dl, AmtVT)); 8874 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 8875 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8876 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 8877 SDValue OutOps[] = { OutLo, OutHi }; 8878 return DAG.getMergeValues(OutOps, dl); 8879 } 8880 8881 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 8882 SDLoc dl(Op); 8883 EVT VT = Op.getValueType(); 8884 unsigned BitWidth = VT.getSizeInBits(); 8885 assert(Op.getNumOperands() == 3 && 8886 VT == Op.getOperand(1).getValueType() && 8887 "Unexpected SRA!"); 8888 8889 // Expand into a bunch of logical ops, followed by a select_cc. 8890 SDValue Lo = Op.getOperand(0); 8891 SDValue Hi = Op.getOperand(1); 8892 SDValue Amt = Op.getOperand(2); 8893 EVT AmtVT = Amt.getValueType(); 8894 8895 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8896 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8897 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8898 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8899 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8900 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8901 DAG.getConstant(-BitWidth, dl, AmtVT)); 8902 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 8903 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 8904 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 8905 Tmp4, Tmp6, ISD::SETLE); 8906 SDValue OutOps[] = { OutLo, OutHi }; 8907 return DAG.getMergeValues(OutOps, dl); 8908 } 8909 8910 SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op, 8911 SelectionDAG &DAG) const { 8912 SDLoc dl(Op); 8913 EVT VT = Op.getValueType(); 8914 unsigned BitWidth = VT.getSizeInBits(); 8915 8916 bool IsFSHL = Op.getOpcode() == ISD::FSHL; 8917 SDValue X = Op.getOperand(0); 8918 SDValue Y = Op.getOperand(1); 8919 SDValue Z = Op.getOperand(2); 8920 EVT AmtVT = Z.getValueType(); 8921 8922 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 8923 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 8924 // This is simpler than TargetLowering::expandFunnelShift because we can rely 8925 // on PowerPC shift by BW being well defined. 8926 Z = DAG.getNode(ISD::AND, dl, AmtVT, Z, 8927 DAG.getConstant(BitWidth - 1, dl, AmtVT)); 8928 SDValue SubZ = 8929 DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z); 8930 X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ); 8931 Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z); 8932 return DAG.getNode(ISD::OR, dl, VT, X, Y); 8933 } 8934 8935 //===----------------------------------------------------------------------===// 8936 // Vector related lowering. 8937 // 8938 8939 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an 8940 /// element size of SplatSize. Cast the result to VT. 8941 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT, 8942 SelectionDAG &DAG, const SDLoc &dl) { 8943 static const MVT VTys[] = { // canonical VT to use for each size. 8944 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 8945 }; 8946 8947 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 8948 8949 // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize. 8950 if (Val == ((1LLU << (SplatSize * 8)) - 1)) { 8951 SplatSize = 1; 8952 Val = 0xFF; 8953 } 8954 8955 EVT CanonicalVT = VTys[SplatSize-1]; 8956 8957 // Build a canonical splat for this value. 8958 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 8959 } 8960 8961 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 8962 /// specified intrinsic ID. 8963 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 8964 const SDLoc &dl, EVT DestVT = MVT::Other) { 8965 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 8966 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8967 DAG.getConstant(IID, dl, MVT::i32), Op); 8968 } 8969 8970 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 8971 /// specified intrinsic ID. 8972 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 8973 SelectionDAG &DAG, const SDLoc &dl, 8974 EVT DestVT = MVT::Other) { 8975 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 8976 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8977 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 8978 } 8979 8980 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 8981 /// specified intrinsic ID. 8982 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 8983 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 8984 EVT DestVT = MVT::Other) { 8985 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 8986 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8987 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 8988 } 8989 8990 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 8991 /// amount. The result has the specified value type. 8992 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 8993 SelectionDAG &DAG, const SDLoc &dl) { 8994 // Force LHS/RHS to be the right type. 8995 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 8996 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 8997 8998 int Ops[16]; 8999 for (unsigned i = 0; i != 16; ++i) 9000 Ops[i] = i + Amt; 9001 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 9002 return DAG.getNode(ISD::BITCAST, dl, VT, T); 9003 } 9004 9005 /// Do we have an efficient pattern in a .td file for this node? 9006 /// 9007 /// \param V - pointer to the BuildVectorSDNode being matched 9008 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 9009 /// 9010 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 9011 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 9012 /// the opposite is true (expansion is beneficial) are: 9013 /// - The node builds a vector out of integers that are not 32 or 64-bits 9014 /// - The node builds a vector out of constants 9015 /// - The node is a "load-and-splat" 9016 /// In all other cases, we will choose to keep the BUILD_VECTOR. 9017 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 9018 bool HasDirectMove, 9019 bool HasP8Vector) { 9020 EVT VecVT = V->getValueType(0); 9021 bool RightType = VecVT == MVT::v2f64 || 9022 (HasP8Vector && VecVT == MVT::v4f32) || 9023 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 9024 if (!RightType) 9025 return false; 9026 9027 bool IsSplat = true; 9028 bool IsLoad = false; 9029 SDValue Op0 = V->getOperand(0); 9030 9031 // This function is called in a block that confirms the node is not a constant 9032 // splat. So a constant BUILD_VECTOR here means the vector is built out of 9033 // different constants. 9034 if (V->isConstant()) 9035 return false; 9036 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 9037 if (V->getOperand(i).isUndef()) 9038 return false; 9039 // We want to expand nodes that represent load-and-splat even if the 9040 // loaded value is a floating point truncation or conversion to int. 9041 if (V->getOperand(i).getOpcode() == ISD::LOAD || 9042 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 9043 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 9044 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 9045 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 9046 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 9047 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 9048 IsLoad = true; 9049 // If the operands are different or the input is not a load and has more 9050 // uses than just this BV node, then it isn't a splat. 9051 if (V->getOperand(i) != Op0 || 9052 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 9053 IsSplat = false; 9054 } 9055 return !(IsSplat && IsLoad); 9056 } 9057 9058 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128. 9059 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 9060 9061 SDLoc dl(Op); 9062 SDValue Op0 = Op->getOperand(0); 9063 9064 if ((Op.getValueType() != MVT::f128) || 9065 (Op0.getOpcode() != ISD::BUILD_PAIR) || 9066 (Op0.getOperand(0).getValueType() != MVT::i64) || 9067 (Op0.getOperand(1).getValueType() != MVT::i64)) 9068 return SDValue(); 9069 9070 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0), 9071 Op0.getOperand(1)); 9072 } 9073 9074 static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) { 9075 const SDValue *InputLoad = &Op; 9076 while (InputLoad->getOpcode() == ISD::BITCAST) 9077 InputLoad = &InputLoad->getOperand(0); 9078 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR || 9079 InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) { 9080 IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED; 9081 InputLoad = &InputLoad->getOperand(0); 9082 } 9083 if (InputLoad->getOpcode() != ISD::LOAD) 9084 return nullptr; 9085 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 9086 return ISD::isNormalLoad(LD) ? InputLoad : nullptr; 9087 } 9088 9089 // Convert the argument APFloat to a single precision APFloat if there is no 9090 // loss in information during the conversion to single precision APFloat and the 9091 // resulting number is not a denormal number. Return true if successful. 9092 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) { 9093 APFloat APFloatToConvert = ArgAPFloat; 9094 bool LosesInfo = true; 9095 APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, 9096 &LosesInfo); 9097 bool Success = (!LosesInfo && !APFloatToConvert.isDenormal()); 9098 if (Success) 9099 ArgAPFloat = APFloatToConvert; 9100 return Success; 9101 } 9102 9103 // Bitcast the argument APInt to a double and convert it to a single precision 9104 // APFloat, bitcast the APFloat to an APInt and assign it to the original 9105 // argument if there is no loss in information during the conversion from 9106 // double to single precision APFloat and the resulting number is not a denormal 9107 // number. Return true if successful. 9108 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) { 9109 double DpValue = ArgAPInt.bitsToDouble(); 9110 APFloat APFloatDp(DpValue); 9111 bool Success = convertToNonDenormSingle(APFloatDp); 9112 if (Success) 9113 ArgAPInt = APFloatDp.bitcastToAPInt(); 9114 return Success; 9115 } 9116 9117 // Nondestructive check for convertTonNonDenormSingle. 9118 bool llvm::checkConvertToNonDenormSingle(APFloat &ArgAPFloat) { 9119 // Only convert if it loses info, since XXSPLTIDP should 9120 // handle the other case. 9121 APFloat APFloatToConvert = ArgAPFloat; 9122 bool LosesInfo = true; 9123 APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, 9124 &LosesInfo); 9125 9126 return (!LosesInfo && !APFloatToConvert.isDenormal()); 9127 } 9128 9129 static bool isValidSplatLoad(const PPCSubtarget &Subtarget, const SDValue &Op, 9130 unsigned &Opcode) { 9131 LoadSDNode *InputNode = dyn_cast<LoadSDNode>(Op.getOperand(0)); 9132 if (!InputNode || !Subtarget.hasVSX() || !ISD::isUNINDEXEDLoad(InputNode)) 9133 return false; 9134 9135 EVT Ty = Op->getValueType(0); 9136 // For v2f64, v4f32 and v4i32 types, we require the load to be non-extending 9137 // as we cannot handle extending loads for these types. 9138 if ((Ty == MVT::v2f64 || Ty == MVT::v4f32 || Ty == MVT::v4i32) && 9139 ISD::isNON_EXTLoad(InputNode)) 9140 return true; 9141 9142 EVT MemVT = InputNode->getMemoryVT(); 9143 // For v8i16 and v16i8 types, extending loads can be handled as long as the 9144 // memory VT is the same vector element VT type. 9145 // The loads feeding into the v8i16 and v16i8 types will be extending because 9146 // scalar i8/i16 are not legal types. 9147 if ((Ty == MVT::v8i16 || Ty == MVT::v16i8) && ISD::isEXTLoad(InputNode) && 9148 (MemVT == Ty.getVectorElementType())) 9149 return true; 9150 9151 if (Ty == MVT::v2i64) { 9152 // Check the extend type, when the input type is i32, and the output vector 9153 // type is v2i64. 9154 if (MemVT == MVT::i32) { 9155 if (ISD::isZEXTLoad(InputNode)) 9156 Opcode = PPCISD::ZEXT_LD_SPLAT; 9157 if (ISD::isSEXTLoad(InputNode)) 9158 Opcode = PPCISD::SEXT_LD_SPLAT; 9159 } 9160 return true; 9161 } 9162 return false; 9163 } 9164 9165 // If this is a case we can't handle, return null and let the default 9166 // expansion code take care of it. If we CAN select this case, and if it 9167 // selects to a single instruction, return Op. Otherwise, if we can codegen 9168 // this case more efficiently than a constant pool load, lower it to the 9169 // sequence of ops that should be used. 9170 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 9171 SelectionDAG &DAG) const { 9172 SDLoc dl(Op); 9173 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 9174 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 9175 9176 // Check if this is a splat of a constant value. 9177 APInt APSplatBits, APSplatUndef; 9178 unsigned SplatBitSize; 9179 bool HasAnyUndefs; 9180 bool BVNIsConstantSplat = 9181 BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 9182 HasAnyUndefs, 0, !Subtarget.isLittleEndian()); 9183 9184 // If it is a splat of a double, check if we can shrink it to a 32 bit 9185 // non-denormal float which when converted back to double gives us the same 9186 // double. This is to exploit the XXSPLTIDP instruction. 9187 // If we lose precision, we use XXSPLTI32DX. 9188 if (BVNIsConstantSplat && (SplatBitSize == 64) && 9189 Subtarget.hasPrefixInstrs()) { 9190 // Check the type first to short-circuit so we don't modify APSplatBits if 9191 // this block isn't executed. 9192 if ((Op->getValueType(0) == MVT::v2f64) && 9193 convertToNonDenormSingle(APSplatBits)) { 9194 SDValue SplatNode = DAG.getNode( 9195 PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64, 9196 DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32)); 9197 return DAG.getBitcast(Op.getValueType(), SplatNode); 9198 } else { 9199 // We may lose precision, so we have to use XXSPLTI32DX. 9200 9201 uint32_t Hi = 9202 (uint32_t)((APSplatBits.getZExtValue() & 0xFFFFFFFF00000000LL) >> 32); 9203 uint32_t Lo = 9204 (uint32_t)(APSplatBits.getZExtValue() & 0xFFFFFFFF); 9205 SDValue SplatNode = DAG.getUNDEF(MVT::v2i64); 9206 9207 if (!Hi || !Lo) 9208 // If either load is 0, then we should generate XXLXOR to set to 0. 9209 SplatNode = DAG.getTargetConstant(0, dl, MVT::v2i64); 9210 9211 if (Hi) 9212 SplatNode = DAG.getNode( 9213 PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode, 9214 DAG.getTargetConstant(0, dl, MVT::i32), 9215 DAG.getTargetConstant(Hi, dl, MVT::i32)); 9216 9217 if (Lo) 9218 SplatNode = 9219 DAG.getNode(PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode, 9220 DAG.getTargetConstant(1, dl, MVT::i32), 9221 DAG.getTargetConstant(Lo, dl, MVT::i32)); 9222 9223 return DAG.getBitcast(Op.getValueType(), SplatNode); 9224 } 9225 } 9226 9227 if (!BVNIsConstantSplat || SplatBitSize > 32) { 9228 unsigned NewOpcode = PPCISD::LD_SPLAT; 9229 9230 // Handle load-and-splat patterns as we have instructions that will do this 9231 // in one go. 9232 if (DAG.isSplatValue(Op, true) && 9233 isValidSplatLoad(Subtarget, Op, NewOpcode)) { 9234 const SDValue *InputLoad = &Op.getOperand(0); 9235 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 9236 9237 // If the input load is an extending load, it will be an i32 -> i64 9238 // extending load and isValidSplatLoad() will update NewOpcode. 9239 unsigned MemorySize = LD->getMemoryVT().getScalarSizeInBits(); 9240 unsigned ElementSize = 9241 MemorySize * ((NewOpcode == PPCISD::LD_SPLAT) ? 1 : 2); 9242 9243 assert(((ElementSize == 2 * MemorySize) 9244 ? (NewOpcode == PPCISD::ZEXT_LD_SPLAT || 9245 NewOpcode == PPCISD::SEXT_LD_SPLAT) 9246 : (NewOpcode == PPCISD::LD_SPLAT)) && 9247 "Unmatched element size and opcode!\n"); 9248 9249 // Checking for a single use of this load, we have to check for vector 9250 // width (128 bits) / ElementSize uses (since each operand of the 9251 // BUILD_VECTOR is a separate use of the value. 9252 unsigned NumUsesOfInputLD = 128 / ElementSize; 9253 for (SDValue BVInOp : Op->ops()) 9254 if (BVInOp.isUndef()) 9255 NumUsesOfInputLD--; 9256 9257 // Exclude somes case where LD_SPLAT is worse than scalar_to_vector: 9258 // Below cases should also happen for "lfiwzx/lfiwax + LE target + index 9259 // 1" and "lxvrhx + BE target + index 7" and "lxvrbx + BE target + index 9260 // 15", but funciton IsValidSplatLoad() now will only return true when 9261 // the data at index 0 is not nullptr. So we will not get into trouble for 9262 // these cases. 9263 // 9264 // case 1 - lfiwzx/lfiwax 9265 // 1.1: load result is i32 and is sign/zero extend to i64; 9266 // 1.2: build a v2i64 vector type with above loaded value; 9267 // 1.3: the vector has only one value at index 0, others are all undef; 9268 // 1.4: on BE target, so that lfiwzx/lfiwax does not need any permute. 9269 if (NumUsesOfInputLD == 1 && 9270 (Op->getValueType(0) == MVT::v2i64 && NewOpcode != PPCISD::LD_SPLAT && 9271 !Subtarget.isLittleEndian() && Subtarget.hasVSX() && 9272 Subtarget.hasLFIWAX())) 9273 return SDValue(); 9274 9275 // case 2 - lxvr[hb]x 9276 // 2.1: load result is at most i16; 9277 // 2.2: build a vector with above loaded value; 9278 // 2.3: the vector has only one value at index 0, others are all undef; 9279 // 2.4: on LE target, so that lxvr[hb]x does not need any permute. 9280 if (NumUsesOfInputLD == 1 && Subtarget.isLittleEndian() && 9281 Subtarget.isISA3_1() && ElementSize <= 16) 9282 return SDValue(); 9283 9284 assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?"); 9285 if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) && 9286 Subtarget.hasVSX()) { 9287 SDValue Ops[] = { 9288 LD->getChain(), // Chain 9289 LD->getBasePtr(), // Ptr 9290 DAG.getValueType(Op.getValueType()) // VT 9291 }; 9292 SDValue LdSplt = DAG.getMemIntrinsicNode( 9293 NewOpcode, dl, DAG.getVTList(Op.getValueType(), MVT::Other), Ops, 9294 LD->getMemoryVT(), LD->getMemOperand()); 9295 // Replace all uses of the output chain of the original load with the 9296 // output chain of the new load. 9297 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), 9298 LdSplt.getValue(1)); 9299 return LdSplt; 9300 } 9301 } 9302 9303 // In 64BIT mode BUILD_VECTOR nodes that are not constant splats of up to 9304 // 32-bits can be lowered to VSX instructions under certain conditions. 9305 // Without VSX, there is no pattern more efficient than expanding the node. 9306 if (Subtarget.hasVSX() && Subtarget.isPPC64() && 9307 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 9308 Subtarget.hasP8Vector())) 9309 return Op; 9310 return SDValue(); 9311 } 9312 9313 uint64_t SplatBits = APSplatBits.getZExtValue(); 9314 uint64_t SplatUndef = APSplatUndef.getZExtValue(); 9315 unsigned SplatSize = SplatBitSize / 8; 9316 9317 // First, handle single instruction cases. 9318 9319 // All zeros? 9320 if (SplatBits == 0) { 9321 // Canonicalize all zero vectors to be v4i32. 9322 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 9323 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 9324 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 9325 } 9326 return Op; 9327 } 9328 9329 // We have XXSPLTIW for constant splats four bytes wide. 9330 // Given vector length is a multiple of 4, 2-byte splats can be replaced 9331 // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to 9332 // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be 9333 // turned into a 4-byte splat of 0xABABABAB. 9334 if (Subtarget.hasPrefixInstrs() && SplatSize == 2) 9335 return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2, 9336 Op.getValueType(), DAG, dl); 9337 9338 if (Subtarget.hasPrefixInstrs() && SplatSize == 4) 9339 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG, 9340 dl); 9341 9342 // We have XXSPLTIB for constant splats one byte wide. 9343 if (Subtarget.hasP9Vector() && SplatSize == 1) 9344 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG, 9345 dl); 9346 9347 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 9348 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 9349 (32-SplatBitSize)); 9350 if (SextVal >= -16 && SextVal <= 15) 9351 return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG, 9352 dl); 9353 9354 // Two instruction sequences. 9355 9356 // If this value is in the range [-32,30] and is even, use: 9357 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 9358 // If this value is in the range [17,31] and is odd, use: 9359 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 9360 // If this value is in the range [-31,-17] and is odd, use: 9361 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 9362 // Note the last two are three-instruction sequences. 9363 if (SextVal >= -32 && SextVal <= 31) { 9364 // To avoid having these optimizations undone by constant folding, 9365 // we convert to a pseudo that will be expanded later into one of 9366 // the above forms. 9367 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 9368 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 9369 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 9370 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 9371 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 9372 if (VT == Op.getValueType()) 9373 return RetVal; 9374 else 9375 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 9376 } 9377 9378 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 9379 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 9380 // for fneg/fabs. 9381 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 9382 // Make -1 and vspltisw -1: 9383 SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl); 9384 9385 // Make the VSLW intrinsic, computing 0x8000_0000. 9386 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 9387 OnesV, DAG, dl); 9388 9389 // xor by OnesV to invert it. 9390 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 9391 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 9392 } 9393 9394 // Check to see if this is a wide variety of vsplti*, binop self cases. 9395 static const signed char SplatCsts[] = { 9396 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 9397 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 9398 }; 9399 9400 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 9401 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 9402 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 9403 int i = SplatCsts[idx]; 9404 9405 // Figure out what shift amount will be used by altivec if shifted by i in 9406 // this splat size. 9407 unsigned TypeShiftAmt = i & (SplatBitSize-1); 9408 9409 // vsplti + shl self. 9410 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 9411 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl); 9412 static const unsigned IIDs[] = { // Intrinsic to use for each size. 9413 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 9414 Intrinsic::ppc_altivec_vslw 9415 }; 9416 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 9417 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 9418 } 9419 9420 // vsplti + srl self. 9421 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 9422 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl); 9423 static const unsigned IIDs[] = { // Intrinsic to use for each size. 9424 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 9425 Intrinsic::ppc_altivec_vsrw 9426 }; 9427 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 9428 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 9429 } 9430 9431 // vsplti + rol self. 9432 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 9433 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 9434 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl); 9435 static const unsigned IIDs[] = { // Intrinsic to use for each size. 9436 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 9437 Intrinsic::ppc_altivec_vrlw 9438 }; 9439 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 9440 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 9441 } 9442 9443 // t = vsplti c, result = vsldoi t, t, 1 9444 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 9445 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl); 9446 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 9447 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 9448 } 9449 // t = vsplti c, result = vsldoi t, t, 2 9450 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 9451 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl); 9452 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 9453 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 9454 } 9455 // t = vsplti c, result = vsldoi t, t, 3 9456 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 9457 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl); 9458 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 9459 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 9460 } 9461 } 9462 9463 return SDValue(); 9464 } 9465 9466 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 9467 /// the specified operations to build the shuffle. 9468 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 9469 SDValue RHS, SelectionDAG &DAG, 9470 const SDLoc &dl) { 9471 unsigned OpNum = (PFEntry >> 26) & 0x0F; 9472 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 9473 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 9474 9475 enum { 9476 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 9477 OP_VMRGHW, 9478 OP_VMRGLW, 9479 OP_VSPLTISW0, 9480 OP_VSPLTISW1, 9481 OP_VSPLTISW2, 9482 OP_VSPLTISW3, 9483 OP_VSLDOI4, 9484 OP_VSLDOI8, 9485 OP_VSLDOI12 9486 }; 9487 9488 if (OpNum == OP_COPY) { 9489 if (LHSID == (1*9+2)*9+3) return LHS; 9490 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 9491 return RHS; 9492 } 9493 9494 SDValue OpLHS, OpRHS; 9495 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 9496 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 9497 9498 int ShufIdxs[16]; 9499 switch (OpNum) { 9500 default: llvm_unreachable("Unknown i32 permute!"); 9501 case OP_VMRGHW: 9502 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 9503 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 9504 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 9505 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 9506 break; 9507 case OP_VMRGLW: 9508 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 9509 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 9510 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 9511 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 9512 break; 9513 case OP_VSPLTISW0: 9514 for (unsigned i = 0; i != 16; ++i) 9515 ShufIdxs[i] = (i&3)+0; 9516 break; 9517 case OP_VSPLTISW1: 9518 for (unsigned i = 0; i != 16; ++i) 9519 ShufIdxs[i] = (i&3)+4; 9520 break; 9521 case OP_VSPLTISW2: 9522 for (unsigned i = 0; i != 16; ++i) 9523 ShufIdxs[i] = (i&3)+8; 9524 break; 9525 case OP_VSPLTISW3: 9526 for (unsigned i = 0; i != 16; ++i) 9527 ShufIdxs[i] = (i&3)+12; 9528 break; 9529 case OP_VSLDOI4: 9530 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 9531 case OP_VSLDOI8: 9532 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 9533 case OP_VSLDOI12: 9534 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 9535 } 9536 EVT VT = OpLHS.getValueType(); 9537 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 9538 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 9539 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 9540 return DAG.getNode(ISD::BITCAST, dl, VT, T); 9541 } 9542 9543 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 9544 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 9545 /// SDValue. 9546 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 9547 SelectionDAG &DAG) const { 9548 const unsigned BytesInVector = 16; 9549 bool IsLE = Subtarget.isLittleEndian(); 9550 SDLoc dl(N); 9551 SDValue V1 = N->getOperand(0); 9552 SDValue V2 = N->getOperand(1); 9553 unsigned ShiftElts = 0, InsertAtByte = 0; 9554 bool Swap = false; 9555 9556 // Shifts required to get the byte we want at element 7. 9557 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 9558 0, 15, 14, 13, 12, 11, 10, 9}; 9559 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 9560 1, 2, 3, 4, 5, 6, 7, 8}; 9561 9562 ArrayRef<int> Mask = N->getMask(); 9563 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 9564 9565 // For each mask element, find out if we're just inserting something 9566 // from V2 into V1 or vice versa. 9567 // Possible permutations inserting an element from V2 into V1: 9568 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 9569 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 9570 // ... 9571 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 9572 // Inserting from V1 into V2 will be similar, except mask range will be 9573 // [16,31]. 9574 9575 bool FoundCandidate = false; 9576 // If both vector operands for the shuffle are the same vector, the mask 9577 // will contain only elements from the first one and the second one will be 9578 // undef. 9579 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 9580 // Go through the mask of half-words to find an element that's being moved 9581 // from one vector to the other. 9582 for (unsigned i = 0; i < BytesInVector; ++i) { 9583 unsigned CurrentElement = Mask[i]; 9584 // If 2nd operand is undefined, we should only look for element 7 in the 9585 // Mask. 9586 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 9587 continue; 9588 9589 bool OtherElementsInOrder = true; 9590 // Examine the other elements in the Mask to see if they're in original 9591 // order. 9592 for (unsigned j = 0; j < BytesInVector; ++j) { 9593 if (j == i) 9594 continue; 9595 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 9596 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 9597 // in which we always assume we're always picking from the 1st operand. 9598 int MaskOffset = 9599 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 9600 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 9601 OtherElementsInOrder = false; 9602 break; 9603 } 9604 } 9605 // If other elements are in original order, we record the number of shifts 9606 // we need to get the element we want into element 7. Also record which byte 9607 // in the vector we should insert into. 9608 if (OtherElementsInOrder) { 9609 // If 2nd operand is undefined, we assume no shifts and no swapping. 9610 if (V2.isUndef()) { 9611 ShiftElts = 0; 9612 Swap = false; 9613 } else { 9614 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 9615 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 9616 : BigEndianShifts[CurrentElement & 0xF]; 9617 Swap = CurrentElement < BytesInVector; 9618 } 9619 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 9620 FoundCandidate = true; 9621 break; 9622 } 9623 } 9624 9625 if (!FoundCandidate) 9626 return SDValue(); 9627 9628 // Candidate found, construct the proper SDAG sequence with VINSERTB, 9629 // optionally with VECSHL if shift is required. 9630 if (Swap) 9631 std::swap(V1, V2); 9632 if (V2.isUndef()) 9633 V2 = V1; 9634 if (ShiftElts) { 9635 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9636 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9637 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 9638 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9639 } 9640 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 9641 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9642 } 9643 9644 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 9645 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 9646 /// SDValue. 9647 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 9648 SelectionDAG &DAG) const { 9649 const unsigned NumHalfWords = 8; 9650 const unsigned BytesInVector = NumHalfWords * 2; 9651 // Check that the shuffle is on half-words. 9652 if (!isNByteElemShuffleMask(N, 2, 1)) 9653 return SDValue(); 9654 9655 bool IsLE = Subtarget.isLittleEndian(); 9656 SDLoc dl(N); 9657 SDValue V1 = N->getOperand(0); 9658 SDValue V2 = N->getOperand(1); 9659 unsigned ShiftElts = 0, InsertAtByte = 0; 9660 bool Swap = false; 9661 9662 // Shifts required to get the half-word we want at element 3. 9663 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 9664 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 9665 9666 uint32_t Mask = 0; 9667 uint32_t OriginalOrderLow = 0x1234567; 9668 uint32_t OriginalOrderHigh = 0x89ABCDEF; 9669 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 9670 // 32-bit space, only need 4-bit nibbles per element. 9671 for (unsigned i = 0; i < NumHalfWords; ++i) { 9672 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9673 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 9674 } 9675 9676 // For each mask element, find out if we're just inserting something 9677 // from V2 into V1 or vice versa. Possible permutations inserting an element 9678 // from V2 into V1: 9679 // X, 1, 2, 3, 4, 5, 6, 7 9680 // 0, X, 2, 3, 4, 5, 6, 7 9681 // 0, 1, X, 3, 4, 5, 6, 7 9682 // 0, 1, 2, X, 4, 5, 6, 7 9683 // 0, 1, 2, 3, X, 5, 6, 7 9684 // 0, 1, 2, 3, 4, X, 6, 7 9685 // 0, 1, 2, 3, 4, 5, X, 7 9686 // 0, 1, 2, 3, 4, 5, 6, X 9687 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 9688 9689 bool FoundCandidate = false; 9690 // Go through the mask of half-words to find an element that's being moved 9691 // from one vector to the other. 9692 for (unsigned i = 0; i < NumHalfWords; ++i) { 9693 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9694 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 9695 uint32_t MaskOtherElts = ~(0xF << MaskShift); 9696 uint32_t TargetOrder = 0x0; 9697 9698 // If both vector operands for the shuffle are the same vector, the mask 9699 // will contain only elements from the first one and the second one will be 9700 // undef. 9701 if (V2.isUndef()) { 9702 ShiftElts = 0; 9703 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 9704 TargetOrder = OriginalOrderLow; 9705 Swap = false; 9706 // Skip if not the correct element or mask of other elements don't equal 9707 // to our expected order. 9708 if (MaskOneElt == VINSERTHSrcElem && 9709 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9710 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9711 FoundCandidate = true; 9712 break; 9713 } 9714 } else { // If both operands are defined. 9715 // Target order is [8,15] if the current mask is between [0,7]. 9716 TargetOrder = 9717 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 9718 // Skip if mask of other elements don't equal our expected order. 9719 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9720 // We only need the last 3 bits for the number of shifts. 9721 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 9722 : BigEndianShifts[MaskOneElt & 0x7]; 9723 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9724 Swap = MaskOneElt < NumHalfWords; 9725 FoundCandidate = true; 9726 break; 9727 } 9728 } 9729 } 9730 9731 if (!FoundCandidate) 9732 return SDValue(); 9733 9734 // Candidate found, construct the proper SDAG sequence with VINSERTH, 9735 // optionally with VECSHL if shift is required. 9736 if (Swap) 9737 std::swap(V1, V2); 9738 if (V2.isUndef()) 9739 V2 = V1; 9740 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 9741 if (ShiftElts) { 9742 // Double ShiftElts because we're left shifting on v16i8 type. 9743 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9744 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 9745 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 9746 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9747 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9748 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9749 } 9750 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 9751 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9752 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9753 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9754 } 9755 9756 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be 9757 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise 9758 /// return the default SDValue. 9759 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN, 9760 SelectionDAG &DAG) const { 9761 // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles 9762 // to v16i8. Peek through the bitcasts to get the actual operands. 9763 SDValue LHS = peekThroughBitcasts(SVN->getOperand(0)); 9764 SDValue RHS = peekThroughBitcasts(SVN->getOperand(1)); 9765 9766 auto ShuffleMask = SVN->getMask(); 9767 SDValue VecShuffle(SVN, 0); 9768 SDLoc DL(SVN); 9769 9770 // Check that we have a four byte shuffle. 9771 if (!isNByteElemShuffleMask(SVN, 4, 1)) 9772 return SDValue(); 9773 9774 // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx. 9775 if (RHS->getOpcode() != ISD::BUILD_VECTOR) { 9776 std::swap(LHS, RHS); 9777 VecShuffle = DAG.getCommutedVectorShuffle(*SVN); 9778 ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask(); 9779 } 9780 9781 // Ensure that the RHS is a vector of constants. 9782 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode()); 9783 if (!BVN) 9784 return SDValue(); 9785 9786 // Check if RHS is a splat of 4-bytes (or smaller). 9787 APInt APSplatValue, APSplatUndef; 9788 unsigned SplatBitSize; 9789 bool HasAnyUndefs; 9790 if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize, 9791 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 9792 SplatBitSize > 32) 9793 return SDValue(); 9794 9795 // Check that the shuffle mask matches the semantics of XXSPLTI32DX. 9796 // The instruction splats a constant C into two words of the source vector 9797 // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }. 9798 // Thus we check that the shuffle mask is the equivalent of 9799 // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively. 9800 // Note: the check above of isNByteElemShuffleMask() ensures that the bytes 9801 // within each word are consecutive, so we only need to check the first byte. 9802 SDValue Index; 9803 bool IsLE = Subtarget.isLittleEndian(); 9804 if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) && 9805 (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 && 9806 ShuffleMask[4] > 15 && ShuffleMask[12] > 15)) 9807 Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32); 9808 else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) && 9809 (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 && 9810 ShuffleMask[0] > 15 && ShuffleMask[8] > 15)) 9811 Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32); 9812 else 9813 return SDValue(); 9814 9815 // If the splat is narrower than 32-bits, we need to get the 32-bit value 9816 // for XXSPLTI32DX. 9817 unsigned SplatVal = APSplatValue.getZExtValue(); 9818 for (; SplatBitSize < 32; SplatBitSize <<= 1) 9819 SplatVal |= (SplatVal << SplatBitSize); 9820 9821 SDValue SplatNode = DAG.getNode( 9822 PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS), 9823 Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32)); 9824 return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode); 9825 } 9826 9827 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8). 9828 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is 9829 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128) 9830 /// i.e (or (shl x, C1), (srl x, 128-C1)). 9831 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const { 9832 assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL"); 9833 assert(Op.getValueType() == MVT::v1i128 && 9834 "Only set v1i128 as custom, other type shouldn't reach here!"); 9835 SDLoc dl(Op); 9836 SDValue N0 = peekThroughBitcasts(Op.getOperand(0)); 9837 SDValue N1 = peekThroughBitcasts(Op.getOperand(1)); 9838 unsigned SHLAmt = N1.getConstantOperandVal(0); 9839 if (SHLAmt % 8 == 0) { 9840 std::array<int, 16> Mask; 9841 std::iota(Mask.begin(), Mask.end(), 0); 9842 std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end()); 9843 if (SDValue Shuffle = 9844 DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0), 9845 DAG.getUNDEF(MVT::v16i8), Mask)) 9846 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle); 9847 } 9848 SDValue ArgVal = DAG.getBitcast(MVT::i128, N0); 9849 SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal, 9850 DAG.getConstant(SHLAmt, dl, MVT::i32)); 9851 SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal, 9852 DAG.getConstant(128 - SHLAmt, dl, MVT::i32)); 9853 SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp); 9854 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp); 9855 } 9856 9857 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 9858 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 9859 /// return the code it can be lowered into. Worst case, it can always be 9860 /// lowered into a vperm. 9861 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 9862 SelectionDAG &DAG) const { 9863 SDLoc dl(Op); 9864 SDValue V1 = Op.getOperand(0); 9865 SDValue V2 = Op.getOperand(1); 9866 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 9867 9868 // Any nodes that were combined in the target-independent combiner prior 9869 // to vector legalization will not be sent to the target combine. Try to 9870 // combine it here. 9871 if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) { 9872 if (!isa<ShuffleVectorSDNode>(NewShuffle)) 9873 return NewShuffle; 9874 Op = NewShuffle; 9875 SVOp = cast<ShuffleVectorSDNode>(Op); 9876 V1 = Op.getOperand(0); 9877 V2 = Op.getOperand(1); 9878 } 9879 EVT VT = Op.getValueType(); 9880 bool isLittleEndian = Subtarget.isLittleEndian(); 9881 9882 unsigned ShiftElts, InsertAtByte; 9883 bool Swap = false; 9884 9885 // If this is a load-and-splat, we can do that with a single instruction 9886 // in some cases. However if the load has multiple uses, we don't want to 9887 // combine it because that will just produce multiple loads. 9888 bool IsPermutedLoad = false; 9889 const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad); 9890 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() && 9891 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) && 9892 InputLoad->hasOneUse()) { 9893 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4); 9894 int SplatIdx = 9895 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG); 9896 9897 // The splat index for permuted loads will be in the left half of the vector 9898 // which is strictly wider than the loaded value by 8 bytes. So we need to 9899 // adjust the splat index to point to the correct address in memory. 9900 if (IsPermutedLoad) { 9901 assert((isLittleEndian || IsFourByte) && 9902 "Unexpected size for permuted load on big endian target"); 9903 SplatIdx += IsFourByte ? 2 : 1; 9904 assert((SplatIdx < (IsFourByte ? 4 : 2)) && 9905 "Splat of a value outside of the loaded memory"); 9906 } 9907 9908 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 9909 // For 4-byte load-and-splat, we need Power9. 9910 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) { 9911 uint64_t Offset = 0; 9912 if (IsFourByte) 9913 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4; 9914 else 9915 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8; 9916 9917 // If the width of the load is the same as the width of the splat, 9918 // loading with an offset would load the wrong memory. 9919 if (LD->getValueType(0).getSizeInBits() == (IsFourByte ? 32 : 64)) 9920 Offset = 0; 9921 9922 SDValue BasePtr = LD->getBasePtr(); 9923 if (Offset != 0) 9924 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 9925 BasePtr, DAG.getIntPtrConstant(Offset, dl)); 9926 SDValue Ops[] = { 9927 LD->getChain(), // Chain 9928 BasePtr, // BasePtr 9929 DAG.getValueType(Op.getValueType()) // VT 9930 }; 9931 SDVTList VTL = 9932 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other); 9933 SDValue LdSplt = 9934 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL, 9935 Ops, LD->getMemoryVT(), LD->getMemOperand()); 9936 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1)); 9937 if (LdSplt.getValueType() != SVOp->getValueType(0)) 9938 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt); 9939 return LdSplt; 9940 } 9941 } 9942 9943 // All v2i64 and v2f64 shuffles are legal 9944 if (VT == MVT::v2i64 || VT == MVT::v2f64) 9945 return Op; 9946 9947 if (Subtarget.hasP9Vector() && 9948 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 9949 isLittleEndian)) { 9950 if (Swap) 9951 std::swap(V1, V2); 9952 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9953 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 9954 if (ShiftElts) { 9955 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 9956 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9957 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 9958 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9959 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9960 } 9961 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 9962 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9963 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9964 } 9965 9966 if (Subtarget.hasPrefixInstrs()) { 9967 SDValue SplatInsertNode; 9968 if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG))) 9969 return SplatInsertNode; 9970 } 9971 9972 if (Subtarget.hasP9Altivec()) { 9973 SDValue NewISDNode; 9974 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 9975 return NewISDNode; 9976 9977 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 9978 return NewISDNode; 9979 } 9980 9981 if (Subtarget.hasVSX() && 9982 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9983 if (Swap) 9984 std::swap(V1, V2); 9985 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9986 SDValue Conv2 = 9987 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 9988 9989 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 9990 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9991 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 9992 } 9993 9994 if (Subtarget.hasVSX() && 9995 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9996 if (Swap) 9997 std::swap(V1, V2); 9998 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9999 SDValue Conv2 = 10000 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 10001 10002 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 10003 DAG.getConstant(ShiftElts, dl, MVT::i32)); 10004 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 10005 } 10006 10007 if (Subtarget.hasP9Vector()) { 10008 if (PPC::isXXBRHShuffleMask(SVOp)) { 10009 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 10010 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv); 10011 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 10012 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 10013 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 10014 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv); 10015 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 10016 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 10017 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 10018 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv); 10019 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 10020 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 10021 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 10022 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv); 10023 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 10024 } 10025 } 10026 10027 if (Subtarget.hasVSX()) { 10028 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 10029 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG); 10030 10031 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 10032 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 10033 DAG.getConstant(SplatIdx, dl, MVT::i32)); 10034 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 10035 } 10036 10037 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 10038 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 10039 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 10040 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 10041 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 10042 } 10043 } 10044 10045 // Cases that are handled by instructions that take permute immediates 10046 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 10047 // selected by the instruction selector. 10048 if (V2.isUndef()) { 10049 if (PPC::isSplatShuffleMask(SVOp, 1) || 10050 PPC::isSplatShuffleMask(SVOp, 2) || 10051 PPC::isSplatShuffleMask(SVOp, 4) || 10052 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 10053 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 10054 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 10055 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 10056 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 10057 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 10058 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 10059 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 10060 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 10061 (Subtarget.hasP8Altivec() && ( 10062 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 10063 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 10064 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 10065 return Op; 10066 } 10067 } 10068 10069 // Altivec has a variety of "shuffle immediates" that take two vector inputs 10070 // and produce a fixed permutation. If any of these match, do not lower to 10071 // VPERM. 10072 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 10073 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 10074 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 10075 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 10076 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 10077 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 10078 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 10079 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 10080 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 10081 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 10082 (Subtarget.hasP8Altivec() && ( 10083 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 10084 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 10085 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 10086 return Op; 10087 10088 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 10089 // perfect shuffle table to emit an optimal matching sequence. 10090 ArrayRef<int> PermMask = SVOp->getMask(); 10091 10092 if (!DisablePerfectShuffle && !isLittleEndian) { 10093 unsigned PFIndexes[4]; 10094 bool isFourElementShuffle = true; 10095 for (unsigned i = 0; i != 4 && isFourElementShuffle; 10096 ++i) { // Element number 10097 unsigned EltNo = 8; // Start out undef. 10098 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 10099 if (PermMask[i * 4 + j] < 0) 10100 continue; // Undef, ignore it. 10101 10102 unsigned ByteSource = PermMask[i * 4 + j]; 10103 if ((ByteSource & 3) != j) { 10104 isFourElementShuffle = false; 10105 break; 10106 } 10107 10108 if (EltNo == 8) { 10109 EltNo = ByteSource / 4; 10110 } else if (EltNo != ByteSource / 4) { 10111 isFourElementShuffle = false; 10112 break; 10113 } 10114 } 10115 PFIndexes[i] = EltNo; 10116 } 10117 10118 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 10119 // perfect shuffle vector to determine if it is cost effective to do this as 10120 // discrete instructions, or whether we should use a vperm. 10121 // For now, we skip this for little endian until such time as we have a 10122 // little-endian perfect shuffle table. 10123 if (isFourElementShuffle) { 10124 // Compute the index in the perfect shuffle table. 10125 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 + 10126 PFIndexes[2] * 9 + PFIndexes[3]; 10127 10128 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 10129 unsigned Cost = (PFEntry >> 30); 10130 10131 // Determining when to avoid vperm is tricky. Many things affect the cost 10132 // of vperm, particularly how many times the perm mask needs to be 10133 // computed. For example, if the perm mask can be hoisted out of a loop or 10134 // is already used (perhaps because there are multiple permutes with the 10135 // same shuffle mask?) the vperm has a cost of 1. OTOH, hoisting the 10136 // permute mask out of the loop requires an extra register. 10137 // 10138 // As a compromise, we only emit discrete instructions if the shuffle can 10139 // be generated in 3 or fewer operations. When we have loop information 10140 // available, if this block is within a loop, we should avoid using vperm 10141 // for 3-operation perms and use a constant pool load instead. 10142 if (Cost < 3) 10143 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 10144 } 10145 } 10146 10147 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 10148 // vector that will get spilled to the constant pool. 10149 if (V2.isUndef()) V2 = V1; 10150 10151 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 10152 // that it is in input element units, not in bytes. Convert now. 10153 10154 // For little endian, the order of the input vectors is reversed, and 10155 // the permutation mask is complemented with respect to 31. This is 10156 // necessary to produce proper semantics with the big-endian-biased vperm 10157 // instruction. 10158 EVT EltVT = V1.getValueType().getVectorElementType(); 10159 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 10160 10161 SmallVector<SDValue, 16> ResultMask; 10162 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 10163 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 10164 10165 for (unsigned j = 0; j != BytesPerElement; ++j) 10166 if (isLittleEndian) 10167 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 10168 dl, MVT::i32)); 10169 else 10170 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 10171 MVT::i32)); 10172 } 10173 10174 ShufflesHandledWithVPERM++; 10175 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 10176 LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n"); 10177 LLVM_DEBUG(SVOp->dump()); 10178 LLVM_DEBUG(dbgs() << "With the following permute control vector:\n"); 10179 LLVM_DEBUG(VPermMask.dump()); 10180 10181 if (isLittleEndian) 10182 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 10183 V2, V1, VPermMask); 10184 else 10185 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 10186 V1, V2, VPermMask); 10187 } 10188 10189 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 10190 /// vector comparison. If it is, return true and fill in Opc/isDot with 10191 /// information about the intrinsic. 10192 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 10193 bool &isDot, const PPCSubtarget &Subtarget) { 10194 unsigned IntrinsicID = 10195 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 10196 CompareOpc = -1; 10197 isDot = false; 10198 switch (IntrinsicID) { 10199 default: 10200 return false; 10201 // Comparison predicates. 10202 case Intrinsic::ppc_altivec_vcmpbfp_p: 10203 CompareOpc = 966; 10204 isDot = true; 10205 break; 10206 case Intrinsic::ppc_altivec_vcmpeqfp_p: 10207 CompareOpc = 198; 10208 isDot = true; 10209 break; 10210 case Intrinsic::ppc_altivec_vcmpequb_p: 10211 CompareOpc = 6; 10212 isDot = true; 10213 break; 10214 case Intrinsic::ppc_altivec_vcmpequh_p: 10215 CompareOpc = 70; 10216 isDot = true; 10217 break; 10218 case Intrinsic::ppc_altivec_vcmpequw_p: 10219 CompareOpc = 134; 10220 isDot = true; 10221 break; 10222 case Intrinsic::ppc_altivec_vcmpequd_p: 10223 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) { 10224 CompareOpc = 199; 10225 isDot = true; 10226 } else 10227 return false; 10228 break; 10229 case Intrinsic::ppc_altivec_vcmpneb_p: 10230 case Intrinsic::ppc_altivec_vcmpneh_p: 10231 case Intrinsic::ppc_altivec_vcmpnew_p: 10232 case Intrinsic::ppc_altivec_vcmpnezb_p: 10233 case Intrinsic::ppc_altivec_vcmpnezh_p: 10234 case Intrinsic::ppc_altivec_vcmpnezw_p: 10235 if (Subtarget.hasP9Altivec()) { 10236 switch (IntrinsicID) { 10237 default: 10238 llvm_unreachable("Unknown comparison intrinsic."); 10239 case Intrinsic::ppc_altivec_vcmpneb_p: 10240 CompareOpc = 7; 10241 break; 10242 case Intrinsic::ppc_altivec_vcmpneh_p: 10243 CompareOpc = 71; 10244 break; 10245 case Intrinsic::ppc_altivec_vcmpnew_p: 10246 CompareOpc = 135; 10247 break; 10248 case Intrinsic::ppc_altivec_vcmpnezb_p: 10249 CompareOpc = 263; 10250 break; 10251 case Intrinsic::ppc_altivec_vcmpnezh_p: 10252 CompareOpc = 327; 10253 break; 10254 case Intrinsic::ppc_altivec_vcmpnezw_p: 10255 CompareOpc = 391; 10256 break; 10257 } 10258 isDot = true; 10259 } else 10260 return false; 10261 break; 10262 case Intrinsic::ppc_altivec_vcmpgefp_p: 10263 CompareOpc = 454; 10264 isDot = true; 10265 break; 10266 case Intrinsic::ppc_altivec_vcmpgtfp_p: 10267 CompareOpc = 710; 10268 isDot = true; 10269 break; 10270 case Intrinsic::ppc_altivec_vcmpgtsb_p: 10271 CompareOpc = 774; 10272 isDot = true; 10273 break; 10274 case Intrinsic::ppc_altivec_vcmpgtsh_p: 10275 CompareOpc = 838; 10276 isDot = true; 10277 break; 10278 case Intrinsic::ppc_altivec_vcmpgtsw_p: 10279 CompareOpc = 902; 10280 isDot = true; 10281 break; 10282 case Intrinsic::ppc_altivec_vcmpgtsd_p: 10283 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) { 10284 CompareOpc = 967; 10285 isDot = true; 10286 } else 10287 return false; 10288 break; 10289 case Intrinsic::ppc_altivec_vcmpgtub_p: 10290 CompareOpc = 518; 10291 isDot = true; 10292 break; 10293 case Intrinsic::ppc_altivec_vcmpgtuh_p: 10294 CompareOpc = 582; 10295 isDot = true; 10296 break; 10297 case Intrinsic::ppc_altivec_vcmpgtuw_p: 10298 CompareOpc = 646; 10299 isDot = true; 10300 break; 10301 case Intrinsic::ppc_altivec_vcmpgtud_p: 10302 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) { 10303 CompareOpc = 711; 10304 isDot = true; 10305 } else 10306 return false; 10307 break; 10308 10309 case Intrinsic::ppc_altivec_vcmpequq: 10310 case Intrinsic::ppc_altivec_vcmpgtsq: 10311 case Intrinsic::ppc_altivec_vcmpgtuq: 10312 if (!Subtarget.isISA3_1()) 10313 return false; 10314 switch (IntrinsicID) { 10315 default: 10316 llvm_unreachable("Unknown comparison intrinsic."); 10317 case Intrinsic::ppc_altivec_vcmpequq: 10318 CompareOpc = 455; 10319 break; 10320 case Intrinsic::ppc_altivec_vcmpgtsq: 10321 CompareOpc = 903; 10322 break; 10323 case Intrinsic::ppc_altivec_vcmpgtuq: 10324 CompareOpc = 647; 10325 break; 10326 } 10327 break; 10328 10329 // VSX predicate comparisons use the same infrastructure 10330 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 10331 case Intrinsic::ppc_vsx_xvcmpgedp_p: 10332 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 10333 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 10334 case Intrinsic::ppc_vsx_xvcmpgesp_p: 10335 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 10336 if (Subtarget.hasVSX()) { 10337 switch (IntrinsicID) { 10338 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 10339 CompareOpc = 99; 10340 break; 10341 case Intrinsic::ppc_vsx_xvcmpgedp_p: 10342 CompareOpc = 115; 10343 break; 10344 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 10345 CompareOpc = 107; 10346 break; 10347 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 10348 CompareOpc = 67; 10349 break; 10350 case Intrinsic::ppc_vsx_xvcmpgesp_p: 10351 CompareOpc = 83; 10352 break; 10353 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 10354 CompareOpc = 75; 10355 break; 10356 } 10357 isDot = true; 10358 } else 10359 return false; 10360 break; 10361 10362 // Normal Comparisons. 10363 case Intrinsic::ppc_altivec_vcmpbfp: 10364 CompareOpc = 966; 10365 break; 10366 case Intrinsic::ppc_altivec_vcmpeqfp: 10367 CompareOpc = 198; 10368 break; 10369 case Intrinsic::ppc_altivec_vcmpequb: 10370 CompareOpc = 6; 10371 break; 10372 case Intrinsic::ppc_altivec_vcmpequh: 10373 CompareOpc = 70; 10374 break; 10375 case Intrinsic::ppc_altivec_vcmpequw: 10376 CompareOpc = 134; 10377 break; 10378 case Intrinsic::ppc_altivec_vcmpequd: 10379 if (Subtarget.hasP8Altivec()) 10380 CompareOpc = 199; 10381 else 10382 return false; 10383 break; 10384 case Intrinsic::ppc_altivec_vcmpneb: 10385 case Intrinsic::ppc_altivec_vcmpneh: 10386 case Intrinsic::ppc_altivec_vcmpnew: 10387 case Intrinsic::ppc_altivec_vcmpnezb: 10388 case Intrinsic::ppc_altivec_vcmpnezh: 10389 case Intrinsic::ppc_altivec_vcmpnezw: 10390 if (Subtarget.hasP9Altivec()) 10391 switch (IntrinsicID) { 10392 default: 10393 llvm_unreachable("Unknown comparison intrinsic."); 10394 case Intrinsic::ppc_altivec_vcmpneb: 10395 CompareOpc = 7; 10396 break; 10397 case Intrinsic::ppc_altivec_vcmpneh: 10398 CompareOpc = 71; 10399 break; 10400 case Intrinsic::ppc_altivec_vcmpnew: 10401 CompareOpc = 135; 10402 break; 10403 case Intrinsic::ppc_altivec_vcmpnezb: 10404 CompareOpc = 263; 10405 break; 10406 case Intrinsic::ppc_altivec_vcmpnezh: 10407 CompareOpc = 327; 10408 break; 10409 case Intrinsic::ppc_altivec_vcmpnezw: 10410 CompareOpc = 391; 10411 break; 10412 } 10413 else 10414 return false; 10415 break; 10416 case Intrinsic::ppc_altivec_vcmpgefp: 10417 CompareOpc = 454; 10418 break; 10419 case Intrinsic::ppc_altivec_vcmpgtfp: 10420 CompareOpc = 710; 10421 break; 10422 case Intrinsic::ppc_altivec_vcmpgtsb: 10423 CompareOpc = 774; 10424 break; 10425 case Intrinsic::ppc_altivec_vcmpgtsh: 10426 CompareOpc = 838; 10427 break; 10428 case Intrinsic::ppc_altivec_vcmpgtsw: 10429 CompareOpc = 902; 10430 break; 10431 case Intrinsic::ppc_altivec_vcmpgtsd: 10432 if (Subtarget.hasP8Altivec()) 10433 CompareOpc = 967; 10434 else 10435 return false; 10436 break; 10437 case Intrinsic::ppc_altivec_vcmpgtub: 10438 CompareOpc = 518; 10439 break; 10440 case Intrinsic::ppc_altivec_vcmpgtuh: 10441 CompareOpc = 582; 10442 break; 10443 case Intrinsic::ppc_altivec_vcmpgtuw: 10444 CompareOpc = 646; 10445 break; 10446 case Intrinsic::ppc_altivec_vcmpgtud: 10447 if (Subtarget.hasP8Altivec()) 10448 CompareOpc = 711; 10449 else 10450 return false; 10451 break; 10452 case Intrinsic::ppc_altivec_vcmpequq_p: 10453 case Intrinsic::ppc_altivec_vcmpgtsq_p: 10454 case Intrinsic::ppc_altivec_vcmpgtuq_p: 10455 if (!Subtarget.isISA3_1()) 10456 return false; 10457 switch (IntrinsicID) { 10458 default: 10459 llvm_unreachable("Unknown comparison intrinsic."); 10460 case Intrinsic::ppc_altivec_vcmpequq_p: 10461 CompareOpc = 455; 10462 break; 10463 case Intrinsic::ppc_altivec_vcmpgtsq_p: 10464 CompareOpc = 903; 10465 break; 10466 case Intrinsic::ppc_altivec_vcmpgtuq_p: 10467 CompareOpc = 647; 10468 break; 10469 } 10470 isDot = true; 10471 break; 10472 } 10473 return true; 10474 } 10475 10476 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 10477 /// lower, do it, otherwise return null. 10478 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 10479 SelectionDAG &DAG) const { 10480 unsigned IntrinsicID = 10481 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10482 10483 SDLoc dl(Op); 10484 10485 switch (IntrinsicID) { 10486 case Intrinsic::thread_pointer: 10487 // Reads the thread pointer register, used for __builtin_thread_pointer. 10488 if (Subtarget.isPPC64()) 10489 return DAG.getRegister(PPC::X13, MVT::i64); 10490 return DAG.getRegister(PPC::R2, MVT::i32); 10491 10492 case Intrinsic::ppc_mma_disassemble_acc: 10493 case Intrinsic::ppc_vsx_disassemble_pair: { 10494 int NumVecs = 2; 10495 SDValue WideVec = Op.getOperand(1); 10496 if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) { 10497 NumVecs = 4; 10498 WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec); 10499 } 10500 SmallVector<SDValue, 4> RetOps; 10501 for (int VecNo = 0; VecNo < NumVecs; VecNo++) { 10502 SDValue Extract = DAG.getNode( 10503 PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec, 10504 DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo 10505 : VecNo, 10506 dl, getPointerTy(DAG.getDataLayout()))); 10507 RetOps.push_back(Extract); 10508 } 10509 return DAG.getMergeValues(RetOps, dl); 10510 } 10511 10512 case Intrinsic::ppc_unpack_longdouble: { 10513 auto *Idx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 10514 assert(Idx && (Idx->getSExtValue() == 0 || Idx->getSExtValue() == 1) && 10515 "Argument of long double unpack must be 0 or 1!"); 10516 return DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Op.getOperand(1), 10517 DAG.getConstant(!!(Idx->getSExtValue()), dl, 10518 Idx->getValueType(0))); 10519 } 10520 10521 case Intrinsic::ppc_compare_exp_lt: 10522 case Intrinsic::ppc_compare_exp_gt: 10523 case Intrinsic::ppc_compare_exp_eq: 10524 case Intrinsic::ppc_compare_exp_uo: { 10525 unsigned Pred; 10526 switch (IntrinsicID) { 10527 case Intrinsic::ppc_compare_exp_lt: 10528 Pred = PPC::PRED_LT; 10529 break; 10530 case Intrinsic::ppc_compare_exp_gt: 10531 Pred = PPC::PRED_GT; 10532 break; 10533 case Intrinsic::ppc_compare_exp_eq: 10534 Pred = PPC::PRED_EQ; 10535 break; 10536 case Intrinsic::ppc_compare_exp_uo: 10537 Pred = PPC::PRED_UN; 10538 break; 10539 } 10540 return SDValue( 10541 DAG.getMachineNode( 10542 PPC::SELECT_CC_I4, dl, MVT::i32, 10543 {SDValue(DAG.getMachineNode(PPC::XSCMPEXPDP, dl, MVT::i32, 10544 Op.getOperand(1), Op.getOperand(2)), 10545 0), 10546 DAG.getConstant(1, dl, MVT::i32), DAG.getConstant(0, dl, MVT::i32), 10547 DAG.getTargetConstant(Pred, dl, MVT::i32)}), 10548 0); 10549 } 10550 case Intrinsic::ppc_test_data_class_d: 10551 case Intrinsic::ppc_test_data_class_f: { 10552 unsigned CmprOpc = PPC::XSTSTDCDP; 10553 if (IntrinsicID == Intrinsic::ppc_test_data_class_f) 10554 CmprOpc = PPC::XSTSTDCSP; 10555 return SDValue( 10556 DAG.getMachineNode( 10557 PPC::SELECT_CC_I4, dl, MVT::i32, 10558 {SDValue(DAG.getMachineNode(CmprOpc, dl, MVT::i32, Op.getOperand(2), 10559 Op.getOperand(1)), 10560 0), 10561 DAG.getConstant(1, dl, MVT::i32), DAG.getConstant(0, dl, MVT::i32), 10562 DAG.getTargetConstant(PPC::PRED_EQ, dl, MVT::i32)}), 10563 0); 10564 } 10565 case Intrinsic::ppc_fnmsub: { 10566 EVT VT = Op.getOperand(1).getValueType(); 10567 if (!Subtarget.hasVSX() || (!Subtarget.hasFloat128() && VT == MVT::f128)) 10568 return DAG.getNode( 10569 ISD::FNEG, dl, VT, 10570 DAG.getNode(ISD::FMA, dl, VT, Op.getOperand(1), Op.getOperand(2), 10571 DAG.getNode(ISD::FNEG, dl, VT, Op.getOperand(3)))); 10572 return DAG.getNode(PPCISD::FNMSUB, dl, VT, Op.getOperand(1), 10573 Op.getOperand(2), Op.getOperand(3)); 10574 } 10575 case Intrinsic::ppc_convert_f128_to_ppcf128: 10576 case Intrinsic::ppc_convert_ppcf128_to_f128: { 10577 RTLIB::Libcall LC = IntrinsicID == Intrinsic::ppc_convert_ppcf128_to_f128 10578 ? RTLIB::CONVERT_PPCF128_F128 10579 : RTLIB::CONVERT_F128_PPCF128; 10580 MakeLibCallOptions CallOptions; 10581 std::pair<SDValue, SDValue> Result = 10582 makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(1), CallOptions, 10583 dl, SDValue()); 10584 return Result.first; 10585 } 10586 case Intrinsic::ppc_maxfe: 10587 case Intrinsic::ppc_maxfl: 10588 case Intrinsic::ppc_maxfs: 10589 case Intrinsic::ppc_minfe: 10590 case Intrinsic::ppc_minfl: 10591 case Intrinsic::ppc_minfs: { 10592 EVT VT = Op.getValueType(); 10593 assert( 10594 all_of(Op->ops().drop_front(4), 10595 [VT](const SDUse &Use) { return Use.getValueType() == VT; }) && 10596 "ppc_[max|min]f[e|l|s] must have uniform type arguments"); 10597 (void)VT; 10598 ISD::CondCode CC = ISD::SETGT; 10599 if (IntrinsicID == Intrinsic::ppc_minfe || 10600 IntrinsicID == Intrinsic::ppc_minfl || 10601 IntrinsicID == Intrinsic::ppc_minfs) 10602 CC = ISD::SETLT; 10603 unsigned I = Op.getNumOperands() - 2, Cnt = I; 10604 SDValue Res = Op.getOperand(I); 10605 for (--I; Cnt != 0; --Cnt, I = (--I == 0 ? (Op.getNumOperands() - 1) : I)) { 10606 Res = 10607 DAG.getSelectCC(dl, Res, Op.getOperand(I), Res, Op.getOperand(I), CC); 10608 } 10609 return Res; 10610 } 10611 } 10612 10613 // If this is a lowered altivec predicate compare, CompareOpc is set to the 10614 // opcode number of the comparison. 10615 int CompareOpc; 10616 bool isDot; 10617 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 10618 return SDValue(); // Don't custom lower most intrinsics. 10619 10620 // If this is a non-dot comparison, make the VCMP node and we are done. 10621 if (!isDot) { 10622 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 10623 Op.getOperand(1), Op.getOperand(2), 10624 DAG.getConstant(CompareOpc, dl, MVT::i32)); 10625 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 10626 } 10627 10628 // Create the PPCISD altivec 'dot' comparison node. 10629 SDValue Ops[] = { 10630 Op.getOperand(2), // LHS 10631 Op.getOperand(3), // RHS 10632 DAG.getConstant(CompareOpc, dl, MVT::i32) 10633 }; 10634 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 10635 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops); 10636 10637 // Now that we have the comparison, emit a copy from the CR to a GPR. 10638 // This is flagged to the above dot comparison. 10639 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 10640 DAG.getRegister(PPC::CR6, MVT::i32), 10641 CompNode.getValue(1)); 10642 10643 // Unpack the result based on how the target uses it. 10644 unsigned BitNo; // Bit # of CR6. 10645 bool InvertBit; // Invert result? 10646 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 10647 default: // Can't happen, don't crash on invalid number though. 10648 case 0: // Return the value of the EQ bit of CR6. 10649 BitNo = 0; InvertBit = false; 10650 break; 10651 case 1: // Return the inverted value of the EQ bit of CR6. 10652 BitNo = 0; InvertBit = true; 10653 break; 10654 case 2: // Return the value of the LT bit of CR6. 10655 BitNo = 2; InvertBit = false; 10656 break; 10657 case 3: // Return the inverted value of the LT bit of CR6. 10658 BitNo = 2; InvertBit = true; 10659 break; 10660 } 10661 10662 // Shift the bit into the low position. 10663 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 10664 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 10665 // Isolate the bit. 10666 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 10667 DAG.getConstant(1, dl, MVT::i32)); 10668 10669 // If we are supposed to, toggle the bit. 10670 if (InvertBit) 10671 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 10672 DAG.getConstant(1, dl, MVT::i32)); 10673 return Flags; 10674 } 10675 10676 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 10677 SelectionDAG &DAG) const { 10678 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 10679 // the beginning of the argument list. 10680 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 10681 SDLoc DL(Op); 10682 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 10683 case Intrinsic::ppc_cfence: { 10684 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 10685 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 10686 SDValue Val = Op.getOperand(ArgStart + 1); 10687 EVT Ty = Val.getValueType(); 10688 if (Ty == MVT::i128) { 10689 // FIXME: Testing one of two paired registers is sufficient to guarantee 10690 // ordering? 10691 Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, Val); 10692 } 10693 return SDValue( 10694 DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 10695 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Val), 10696 Op.getOperand(0)), 10697 0); 10698 } 10699 default: 10700 break; 10701 } 10702 return SDValue(); 10703 } 10704 10705 // Lower scalar BSWAP64 to xxbrd. 10706 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 10707 SDLoc dl(Op); 10708 if (!Subtarget.isPPC64()) 10709 return Op; 10710 // MTVSRDD 10711 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 10712 Op.getOperand(0)); 10713 // XXBRD 10714 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op); 10715 // MFVSRD 10716 int VectorIndex = 0; 10717 if (Subtarget.isLittleEndian()) 10718 VectorIndex = 1; 10719 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 10720 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 10721 return Op; 10722 } 10723 10724 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 10725 // compared to a value that is atomically loaded (atomic loads zero-extend). 10726 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 10727 SelectionDAG &DAG) const { 10728 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 10729 "Expecting an atomic compare-and-swap here."); 10730 SDLoc dl(Op); 10731 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 10732 EVT MemVT = AtomicNode->getMemoryVT(); 10733 if (MemVT.getSizeInBits() >= 32) 10734 return Op; 10735 10736 SDValue CmpOp = Op.getOperand(2); 10737 // If this is already correctly zero-extended, leave it alone. 10738 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 10739 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 10740 return Op; 10741 10742 // Clear the high bits of the compare operand. 10743 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 10744 SDValue NewCmpOp = 10745 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 10746 DAG.getConstant(MaskVal, dl, MVT::i32)); 10747 10748 // Replace the existing compare operand with the properly zero-extended one. 10749 SmallVector<SDValue, 4> Ops; 10750 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 10751 Ops.push_back(AtomicNode->getOperand(i)); 10752 Ops[2] = NewCmpOp; 10753 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 10754 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 10755 auto NodeTy = 10756 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 10757 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 10758 } 10759 10760 SDValue PPCTargetLowering::LowerATOMIC_LOAD_STORE(SDValue Op, 10761 SelectionDAG &DAG) const { 10762 AtomicSDNode *N = cast<AtomicSDNode>(Op.getNode()); 10763 EVT MemVT = N->getMemoryVT(); 10764 assert(MemVT.getSimpleVT() == MVT::i128 && 10765 "Expect quadword atomic operations"); 10766 SDLoc dl(N); 10767 unsigned Opc = N->getOpcode(); 10768 switch (Opc) { 10769 case ISD::ATOMIC_LOAD: { 10770 // Lower quadword atomic load to int_ppc_atomic_load_i128 which will be 10771 // lowered to ppc instructions by pattern matching instruction selector. 10772 SDVTList Tys = DAG.getVTList(MVT::i64, MVT::i64, MVT::Other); 10773 SmallVector<SDValue, 4> Ops{ 10774 N->getOperand(0), 10775 DAG.getConstant(Intrinsic::ppc_atomic_load_i128, dl, MVT::i32)}; 10776 for (int I = 1, E = N->getNumOperands(); I < E; ++I) 10777 Ops.push_back(N->getOperand(I)); 10778 SDValue LoadedVal = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, Tys, 10779 Ops, MemVT, N->getMemOperand()); 10780 SDValue ValLo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i128, LoadedVal); 10781 SDValue ValHi = 10782 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i128, LoadedVal.getValue(1)); 10783 ValHi = DAG.getNode(ISD::SHL, dl, MVT::i128, ValHi, 10784 DAG.getConstant(64, dl, MVT::i32)); 10785 SDValue Val = 10786 DAG.getNode(ISD::OR, dl, {MVT::i128, MVT::Other}, {ValLo, ValHi}); 10787 return DAG.getNode(ISD::MERGE_VALUES, dl, {MVT::i128, MVT::Other}, 10788 {Val, LoadedVal.getValue(2)}); 10789 } 10790 case ISD::ATOMIC_STORE: { 10791 // Lower quadword atomic store to int_ppc_atomic_store_i128 which will be 10792 // lowered to ppc instructions by pattern matching instruction selector. 10793 SDVTList Tys = DAG.getVTList(MVT::Other); 10794 SmallVector<SDValue, 4> Ops{ 10795 N->getOperand(0), 10796 DAG.getConstant(Intrinsic::ppc_atomic_store_i128, dl, MVT::i32)}; 10797 SDValue Val = N->getOperand(2); 10798 SDValue ValLo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i64, Val); 10799 SDValue ValHi = DAG.getNode(ISD::SRL, dl, MVT::i128, Val, 10800 DAG.getConstant(64, dl, MVT::i32)); 10801 ValHi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i64, ValHi); 10802 Ops.push_back(ValLo); 10803 Ops.push_back(ValHi); 10804 Ops.push_back(N->getOperand(1)); 10805 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, dl, Tys, Ops, MemVT, 10806 N->getMemOperand()); 10807 } 10808 default: 10809 llvm_unreachable("Unexpected atomic opcode"); 10810 } 10811 } 10812 10813 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 10814 SelectionDAG &DAG) const { 10815 SDLoc dl(Op); 10816 // Create a stack slot that is 16-byte aligned. 10817 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10818 int FrameIdx = MFI.CreateStackObject(16, Align(16), false); 10819 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 10820 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 10821 10822 // Store the input value into Value#0 of the stack slot. 10823 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 10824 MachinePointerInfo()); 10825 // Load it out. 10826 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 10827 } 10828 10829 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 10830 SelectionDAG &DAG) const { 10831 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 10832 "Should only be called for ISD::INSERT_VECTOR_ELT"); 10833 10834 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 10835 10836 EVT VT = Op.getValueType(); 10837 SDLoc dl(Op); 10838 SDValue V1 = Op.getOperand(0); 10839 SDValue V2 = Op.getOperand(1); 10840 10841 if (VT == MVT::v2f64 && C) 10842 return Op; 10843 10844 if (Subtarget.hasP9Vector()) { 10845 // A f32 load feeding into a v4f32 insert_vector_elt is handled in this way 10846 // because on P10, it allows this specific insert_vector_elt load pattern to 10847 // utilize the refactored load and store infrastructure in order to exploit 10848 // prefixed loads. 10849 // On targets with inexpensive direct moves (Power9 and up), a 10850 // (insert_vector_elt v4f32:$vec, (f32 load)) is always better as an integer 10851 // load since a single precision load will involve conversion to double 10852 // precision on the load followed by another conversion to single precision. 10853 if ((VT == MVT::v4f32) && (V2.getValueType() == MVT::f32) && 10854 (isa<LoadSDNode>(V2))) { 10855 SDValue BitcastVector = DAG.getBitcast(MVT::v4i32, V1); 10856 SDValue BitcastLoad = DAG.getBitcast(MVT::i32, V2); 10857 SDValue InsVecElt = 10858 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4i32, BitcastVector, 10859 BitcastLoad, Op.getOperand(2)); 10860 return DAG.getBitcast(MVT::v4f32, InsVecElt); 10861 } 10862 } 10863 10864 if (Subtarget.isISA3_1()) { 10865 if ((VT == MVT::v2i64 || VT == MVT::v2f64) && !Subtarget.isPPC64()) 10866 return SDValue(); 10867 // On P10, we have legal lowering for constant and variable indices for 10868 // all vectors. 10869 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || 10870 VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64) 10871 return Op; 10872 } 10873 10874 // Before P10, we have legal lowering for constant indices but not for 10875 // variable ones. 10876 if (!C) 10877 return SDValue(); 10878 10879 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 10880 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 10881 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 10882 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 10883 unsigned InsertAtElement = C->getZExtValue(); 10884 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 10885 if (Subtarget.isLittleEndian()) { 10886 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 10887 } 10888 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 10889 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 10890 } 10891 return Op; 10892 } 10893 10894 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 10895 SelectionDAG &DAG) const { 10896 SDLoc dl(Op); 10897 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 10898 SDValue LoadChain = LN->getChain(); 10899 SDValue BasePtr = LN->getBasePtr(); 10900 EVT VT = Op.getValueType(); 10901 10902 if (VT != MVT::v256i1 && VT != MVT::v512i1) 10903 return Op; 10904 10905 // Type v256i1 is used for pairs and v512i1 is used for accumulators. 10906 // Here we create 2 or 4 v16i8 loads to load the pair or accumulator value in 10907 // 2 or 4 vsx registers. 10908 assert((VT != MVT::v512i1 || Subtarget.hasMMA()) && 10909 "Type unsupported without MMA"); 10910 assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) && 10911 "Type unsupported without paired vector support"); 10912 Align Alignment = LN->getAlign(); 10913 SmallVector<SDValue, 4> Loads; 10914 SmallVector<SDValue, 4> LoadChains; 10915 unsigned NumVecs = VT.getSizeInBits() / 128; 10916 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) { 10917 SDValue Load = 10918 DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr, 10919 LN->getPointerInfo().getWithOffset(Idx * 16), 10920 commonAlignment(Alignment, Idx * 16), 10921 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10922 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10923 DAG.getConstant(16, dl, BasePtr.getValueType())); 10924 Loads.push_back(Load); 10925 LoadChains.push_back(Load.getValue(1)); 10926 } 10927 if (Subtarget.isLittleEndian()) { 10928 std::reverse(Loads.begin(), Loads.end()); 10929 std::reverse(LoadChains.begin(), LoadChains.end()); 10930 } 10931 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 10932 SDValue Value = 10933 DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD, 10934 dl, VT, Loads); 10935 SDValue RetOps[] = {Value, TF}; 10936 return DAG.getMergeValues(RetOps, dl); 10937 } 10938 10939 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 10940 SelectionDAG &DAG) const { 10941 SDLoc dl(Op); 10942 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 10943 SDValue StoreChain = SN->getChain(); 10944 SDValue BasePtr = SN->getBasePtr(); 10945 SDValue Value = SN->getValue(); 10946 EVT StoreVT = Value.getValueType(); 10947 10948 if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1) 10949 return Op; 10950 10951 // Type v256i1 is used for pairs and v512i1 is used for accumulators. 10952 // Here we create 2 or 4 v16i8 stores to store the pair or accumulator 10953 // underlying registers individually. 10954 assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) && 10955 "Type unsupported without MMA"); 10956 assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) && 10957 "Type unsupported without paired vector support"); 10958 Align Alignment = SN->getAlign(); 10959 SmallVector<SDValue, 4> Stores; 10960 unsigned NumVecs = 2; 10961 if (StoreVT == MVT::v512i1) { 10962 Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value); 10963 NumVecs = 4; 10964 } 10965 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) { 10966 unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx; 10967 SDValue Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value, 10968 DAG.getConstant(VecNum, dl, getPointerTy(DAG.getDataLayout()))); 10969 SDValue Store = 10970 DAG.getStore(StoreChain, dl, Elt, BasePtr, 10971 SN->getPointerInfo().getWithOffset(Idx * 16), 10972 commonAlignment(Alignment, Idx * 16), 10973 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 10974 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10975 DAG.getConstant(16, dl, BasePtr.getValueType())); 10976 Stores.push_back(Store); 10977 } 10978 SDValue TF = DAG.getTokenFactor(dl, Stores); 10979 return TF; 10980 } 10981 10982 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 10983 SDLoc dl(Op); 10984 if (Op.getValueType() == MVT::v4i32) { 10985 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10986 10987 SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl); 10988 // +16 as shift amt. 10989 SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl); 10990 SDValue RHSSwap = // = vrlw RHS, 16 10991 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 10992 10993 // Shrinkify inputs to v8i16. 10994 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 10995 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 10996 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 10997 10998 // Low parts multiplied together, generating 32-bit results (we ignore the 10999 // top parts). 11000 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 11001 LHS, RHS, DAG, dl, MVT::v4i32); 11002 11003 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 11004 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 11005 // Shift the high parts up 16 bits. 11006 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 11007 Neg16, DAG, dl); 11008 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 11009 } else if (Op.getValueType() == MVT::v16i8) { 11010 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 11011 bool isLittleEndian = Subtarget.isLittleEndian(); 11012 11013 // Multiply the even 8-bit parts, producing 16-bit sums. 11014 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 11015 LHS, RHS, DAG, dl, MVT::v8i16); 11016 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 11017 11018 // Multiply the odd 8-bit parts, producing 16-bit sums. 11019 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 11020 LHS, RHS, DAG, dl, MVT::v8i16); 11021 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 11022 11023 // Merge the results together. Because vmuleub and vmuloub are 11024 // instructions with a big-endian bias, we must reverse the 11025 // element numbering and reverse the meaning of "odd" and "even" 11026 // when generating little endian code. 11027 int Ops[16]; 11028 for (unsigned i = 0; i != 8; ++i) { 11029 if (isLittleEndian) { 11030 Ops[i*2 ] = 2*i; 11031 Ops[i*2+1] = 2*i+16; 11032 } else { 11033 Ops[i*2 ] = 2*i+1; 11034 Ops[i*2+1] = 2*i+1+16; 11035 } 11036 } 11037 if (isLittleEndian) 11038 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 11039 else 11040 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 11041 } else { 11042 llvm_unreachable("Unknown mul to lower!"); 11043 } 11044 } 11045 11046 SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 11047 bool IsStrict = Op->isStrictFPOpcode(); 11048 if (Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 && 11049 !Subtarget.hasP9Vector()) 11050 return SDValue(); 11051 11052 return Op; 11053 } 11054 11055 // Custom lowering for fpext vf32 to v2f64 11056 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 11057 11058 assert(Op.getOpcode() == ISD::FP_EXTEND && 11059 "Should only be called for ISD::FP_EXTEND"); 11060 11061 // FIXME: handle extends from half precision float vectors on P9. 11062 // We only want to custom lower an extend from v2f32 to v2f64. 11063 if (Op.getValueType() != MVT::v2f64 || 11064 Op.getOperand(0).getValueType() != MVT::v2f32) 11065 return SDValue(); 11066 11067 SDLoc dl(Op); 11068 SDValue Op0 = Op.getOperand(0); 11069 11070 switch (Op0.getOpcode()) { 11071 default: 11072 return SDValue(); 11073 case ISD::EXTRACT_SUBVECTOR: { 11074 assert(Op0.getNumOperands() == 2 && 11075 isa<ConstantSDNode>(Op0->getOperand(1)) && 11076 "Node should have 2 operands with second one being a constant!"); 11077 11078 if (Op0.getOperand(0).getValueType() != MVT::v4f32) 11079 return SDValue(); 11080 11081 // Custom lower is only done for high or low doubleword. 11082 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue(); 11083 if (Idx % 2 != 0) 11084 return SDValue(); 11085 11086 // Since input is v4f32, at this point Idx is either 0 or 2. 11087 // Shift to get the doubleword position we want. 11088 int DWord = Idx >> 1; 11089 11090 // High and low word positions are different on little endian. 11091 if (Subtarget.isLittleEndian()) 11092 DWord ^= 0x1; 11093 11094 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, 11095 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32)); 11096 } 11097 case ISD::FADD: 11098 case ISD::FMUL: 11099 case ISD::FSUB: { 11100 SDValue NewLoad[2]; 11101 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) { 11102 // Ensure both input are loads. 11103 SDValue LdOp = Op0.getOperand(i); 11104 if (LdOp.getOpcode() != ISD::LOAD) 11105 return SDValue(); 11106 // Generate new load node. 11107 LoadSDNode *LD = cast<LoadSDNode>(LdOp); 11108 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 11109 NewLoad[i] = DAG.getMemIntrinsicNode( 11110 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 11111 LD->getMemoryVT(), LD->getMemOperand()); 11112 } 11113 SDValue NewOp = 11114 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0], 11115 NewLoad[1], Op0.getNode()->getFlags()); 11116 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp, 11117 DAG.getConstant(0, dl, MVT::i32)); 11118 } 11119 case ISD::LOAD: { 11120 LoadSDNode *LD = cast<LoadSDNode>(Op0); 11121 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 11122 SDValue NewLd = DAG.getMemIntrinsicNode( 11123 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 11124 LD->getMemoryVT(), LD->getMemOperand()); 11125 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd, 11126 DAG.getConstant(0, dl, MVT::i32)); 11127 } 11128 } 11129 llvm_unreachable("ERROR:Should return for all cases within swtich."); 11130 } 11131 11132 /// LowerOperation - Provide custom lowering hooks for some operations. 11133 /// 11134 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 11135 switch (Op.getOpcode()) { 11136 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 11137 case ISD::FPOW: return lowerPow(Op, DAG); 11138 case ISD::FSIN: return lowerSin(Op, DAG); 11139 case ISD::FCOS: return lowerCos(Op, DAG); 11140 case ISD::FLOG: return lowerLog(Op, DAG); 11141 case ISD::FLOG10: return lowerLog10(Op, DAG); 11142 case ISD::FEXP: return lowerExp(Op, DAG); 11143 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 11144 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 11145 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 11146 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 11147 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 11148 case ISD::STRICT_FSETCC: 11149 case ISD::STRICT_FSETCCS: 11150 case ISD::SETCC: return LowerSETCC(Op, DAG); 11151 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 11152 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 11153 11154 case ISD::INLINEASM: 11155 case ISD::INLINEASM_BR: return LowerINLINEASM(Op, DAG); 11156 // Variable argument lowering. 11157 case ISD::VASTART: return LowerVASTART(Op, DAG); 11158 case ISD::VAARG: return LowerVAARG(Op, DAG); 11159 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 11160 11161 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); 11162 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 11163 case ISD::GET_DYNAMIC_AREA_OFFSET: 11164 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 11165 11166 // Exception handling lowering. 11167 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG); 11168 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 11169 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 11170 11171 case ISD::LOAD: return LowerLOAD(Op, DAG); 11172 case ISD::STORE: return LowerSTORE(Op, DAG); 11173 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 11174 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 11175 case ISD::STRICT_FP_TO_UINT: 11176 case ISD::STRICT_FP_TO_SINT: 11177 case ISD::FP_TO_UINT: 11178 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op)); 11179 case ISD::STRICT_UINT_TO_FP: 11180 case ISD::STRICT_SINT_TO_FP: 11181 case ISD::UINT_TO_FP: 11182 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 11183 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 11184 11185 // Lower 64-bit shifts. 11186 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 11187 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 11188 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 11189 11190 case ISD::FSHL: return LowerFunnelShift(Op, DAG); 11191 case ISD::FSHR: return LowerFunnelShift(Op, DAG); 11192 11193 // Vector-related lowering. 11194 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 11195 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 11196 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 11197 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 11198 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 11199 case ISD::MUL: return LowerMUL(Op, DAG); 11200 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 11201 case ISD::STRICT_FP_ROUND: 11202 case ISD::FP_ROUND: 11203 return LowerFP_ROUND(Op, DAG); 11204 case ISD::ROTL: return LowerROTL(Op, DAG); 11205 11206 // For counter-based loop handling. 11207 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 11208 11209 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 11210 11211 // Frame & Return address. 11212 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 11213 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 11214 11215 case ISD::INTRINSIC_VOID: 11216 return LowerINTRINSIC_VOID(Op, DAG); 11217 case ISD::BSWAP: 11218 return LowerBSWAP(Op, DAG); 11219 case ISD::ATOMIC_CMP_SWAP: 11220 return LowerATOMIC_CMP_SWAP(Op, DAG); 11221 case ISD::ATOMIC_STORE: 11222 return LowerATOMIC_LOAD_STORE(Op, DAG); 11223 } 11224 } 11225 11226 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 11227 SmallVectorImpl<SDValue>&Results, 11228 SelectionDAG &DAG) const { 11229 SDLoc dl(N); 11230 switch (N->getOpcode()) { 11231 default: 11232 llvm_unreachable("Do not know how to custom type legalize this operation!"); 11233 case ISD::ATOMIC_LOAD: { 11234 SDValue Res = LowerATOMIC_LOAD_STORE(SDValue(N, 0), DAG); 11235 Results.push_back(Res); 11236 Results.push_back(Res.getValue(1)); 11237 break; 11238 } 11239 case ISD::READCYCLECOUNTER: { 11240 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 11241 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 11242 11243 Results.push_back( 11244 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1))); 11245 Results.push_back(RTB.getValue(2)); 11246 break; 11247 } 11248 case ISD::INTRINSIC_W_CHAIN: { 11249 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 11250 Intrinsic::loop_decrement) 11251 break; 11252 11253 assert(N->getValueType(0) == MVT::i1 && 11254 "Unexpected result type for CTR decrement intrinsic"); 11255 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 11256 N->getValueType(0)); 11257 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 11258 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 11259 N->getOperand(1)); 11260 11261 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 11262 Results.push_back(NewInt.getValue(1)); 11263 break; 11264 } 11265 case ISD::INTRINSIC_WO_CHAIN: { 11266 switch (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()) { 11267 case Intrinsic::ppc_pack_longdouble: 11268 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 11269 N->getOperand(2), N->getOperand(1))); 11270 break; 11271 case Intrinsic::ppc_maxfe: 11272 case Intrinsic::ppc_minfe: 11273 case Intrinsic::ppc_fnmsub: 11274 case Intrinsic::ppc_convert_f128_to_ppcf128: 11275 Results.push_back(LowerINTRINSIC_WO_CHAIN(SDValue(N, 0), DAG)); 11276 break; 11277 } 11278 break; 11279 } 11280 case ISD::VAARG: { 11281 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 11282 return; 11283 11284 EVT VT = N->getValueType(0); 11285 11286 if (VT == MVT::i64) { 11287 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 11288 11289 Results.push_back(NewNode); 11290 Results.push_back(NewNode.getValue(1)); 11291 } 11292 return; 11293 } 11294 case ISD::STRICT_FP_TO_SINT: 11295 case ISD::STRICT_FP_TO_UINT: 11296 case ISD::FP_TO_SINT: 11297 case ISD::FP_TO_UINT: { 11298 // LowerFP_TO_INT() can only handle f32 and f64. 11299 if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() == 11300 MVT::ppcf128) 11301 return; 11302 SDValue LoweredValue = LowerFP_TO_INT(SDValue(N, 0), DAG, dl); 11303 Results.push_back(LoweredValue); 11304 if (N->isStrictFPOpcode()) 11305 Results.push_back(LoweredValue.getValue(1)); 11306 return; 11307 } 11308 case ISD::TRUNCATE: { 11309 if (!N->getValueType(0).isVector()) 11310 return; 11311 SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG); 11312 if (Lowered) 11313 Results.push_back(Lowered); 11314 return; 11315 } 11316 case ISD::FSHL: 11317 case ISD::FSHR: 11318 // Don't handle funnel shifts here. 11319 return; 11320 case ISD::BITCAST: 11321 // Don't handle bitcast here. 11322 return; 11323 case ISD::FP_EXTEND: 11324 SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG); 11325 if (Lowered) 11326 Results.push_back(Lowered); 11327 return; 11328 } 11329 } 11330 11331 //===----------------------------------------------------------------------===// 11332 // Other Lowering Code 11333 //===----------------------------------------------------------------------===// 11334 11335 static Instruction *callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id) { 11336 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 11337 Function *Func = Intrinsic::getDeclaration(M, Id); 11338 return Builder.CreateCall(Func, {}); 11339 } 11340 11341 // The mappings for emitLeading/TrailingFence is taken from 11342 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 11343 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilderBase &Builder, 11344 Instruction *Inst, 11345 AtomicOrdering Ord) const { 11346 if (Ord == AtomicOrdering::SequentiallyConsistent) 11347 return callIntrinsic(Builder, Intrinsic::ppc_sync); 11348 if (isReleaseOrStronger(Ord)) 11349 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 11350 return nullptr; 11351 } 11352 11353 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilderBase &Builder, 11354 Instruction *Inst, 11355 AtomicOrdering Ord) const { 11356 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 11357 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 11358 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 11359 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 11360 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 11361 return Builder.CreateCall( 11362 Intrinsic::getDeclaration( 11363 Builder.GetInsertBlock()->getParent()->getParent(), 11364 Intrinsic::ppc_cfence, {Inst->getType()}), 11365 {Inst}); 11366 // FIXME: Can use isync for rmw operation. 11367 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 11368 } 11369 return nullptr; 11370 } 11371 11372 MachineBasicBlock * 11373 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 11374 unsigned AtomicSize, 11375 unsigned BinOpcode, 11376 unsigned CmpOpcode, 11377 unsigned CmpPred) const { 11378 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 11379 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11380 11381 auto LoadMnemonic = PPC::LDARX; 11382 auto StoreMnemonic = PPC::STDCX; 11383 switch (AtomicSize) { 11384 default: 11385 llvm_unreachable("Unexpected size of atomic entity"); 11386 case 1: 11387 LoadMnemonic = PPC::LBARX; 11388 StoreMnemonic = PPC::STBCX; 11389 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 11390 break; 11391 case 2: 11392 LoadMnemonic = PPC::LHARX; 11393 StoreMnemonic = PPC::STHCX; 11394 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 11395 break; 11396 case 4: 11397 LoadMnemonic = PPC::LWARX; 11398 StoreMnemonic = PPC::STWCX; 11399 break; 11400 case 8: 11401 LoadMnemonic = PPC::LDARX; 11402 StoreMnemonic = PPC::STDCX; 11403 break; 11404 } 11405 11406 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11407 MachineFunction *F = BB->getParent(); 11408 MachineFunction::iterator It = ++BB->getIterator(); 11409 11410 Register dest = MI.getOperand(0).getReg(); 11411 Register ptrA = MI.getOperand(1).getReg(); 11412 Register ptrB = MI.getOperand(2).getReg(); 11413 Register incr = MI.getOperand(3).getReg(); 11414 DebugLoc dl = MI.getDebugLoc(); 11415 11416 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 11417 MachineBasicBlock *loop2MBB = 11418 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 11419 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11420 F->insert(It, loopMBB); 11421 if (CmpOpcode) 11422 F->insert(It, loop2MBB); 11423 F->insert(It, exitMBB); 11424 exitMBB->splice(exitMBB->begin(), BB, 11425 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11426 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11427 11428 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11429 Register TmpReg = (!BinOpcode) ? incr : 11430 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 11431 : &PPC::GPRCRegClass); 11432 11433 // thisMBB: 11434 // ... 11435 // fallthrough --> loopMBB 11436 BB->addSuccessor(loopMBB); 11437 11438 // loopMBB: 11439 // l[wd]arx dest, ptr 11440 // add r0, dest, incr 11441 // st[wd]cx. r0, ptr 11442 // bne- loopMBB 11443 // fallthrough --> exitMBB 11444 11445 // For max/min... 11446 // loopMBB: 11447 // l[wd]arx dest, ptr 11448 // cmpl?[wd] incr, dest 11449 // bgt exitMBB 11450 // loop2MBB: 11451 // st[wd]cx. dest, ptr 11452 // bne- loopMBB 11453 // fallthrough --> exitMBB 11454 11455 BB = loopMBB; 11456 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 11457 .addReg(ptrA).addReg(ptrB); 11458 if (BinOpcode) 11459 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 11460 if (CmpOpcode) { 11461 // Signed comparisons of byte or halfword values must be sign-extended. 11462 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 11463 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 11464 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 11465 ExtReg).addReg(dest); 11466 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 11467 .addReg(incr).addReg(ExtReg); 11468 } else 11469 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 11470 .addReg(incr).addReg(dest); 11471 11472 BuildMI(BB, dl, TII->get(PPC::BCC)) 11473 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 11474 BB->addSuccessor(loop2MBB); 11475 BB->addSuccessor(exitMBB); 11476 BB = loop2MBB; 11477 } 11478 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11479 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 11480 BuildMI(BB, dl, TII->get(PPC::BCC)) 11481 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 11482 BB->addSuccessor(loopMBB); 11483 BB->addSuccessor(exitMBB); 11484 11485 // exitMBB: 11486 // ... 11487 BB = exitMBB; 11488 return BB; 11489 } 11490 11491 static bool isSignExtended(MachineInstr &MI, const PPCInstrInfo *TII) { 11492 switch(MI.getOpcode()) { 11493 default: 11494 return false; 11495 case PPC::COPY: 11496 return TII->isSignExtended(MI); 11497 case PPC::LHA: 11498 case PPC::LHA8: 11499 case PPC::LHAU: 11500 case PPC::LHAU8: 11501 case PPC::LHAUX: 11502 case PPC::LHAUX8: 11503 case PPC::LHAX: 11504 case PPC::LHAX8: 11505 case PPC::LWA: 11506 case PPC::LWAUX: 11507 case PPC::LWAX: 11508 case PPC::LWAX_32: 11509 case PPC::LWA_32: 11510 case PPC::PLHA: 11511 case PPC::PLHA8: 11512 case PPC::PLHA8pc: 11513 case PPC::PLHApc: 11514 case PPC::PLWA: 11515 case PPC::PLWA8: 11516 case PPC::PLWA8pc: 11517 case PPC::PLWApc: 11518 case PPC::EXTSB: 11519 case PPC::EXTSB8: 11520 case PPC::EXTSB8_32_64: 11521 case PPC::EXTSB8_rec: 11522 case PPC::EXTSB_rec: 11523 case PPC::EXTSH: 11524 case PPC::EXTSH8: 11525 case PPC::EXTSH8_32_64: 11526 case PPC::EXTSH8_rec: 11527 case PPC::EXTSH_rec: 11528 case PPC::EXTSW: 11529 case PPC::EXTSWSLI: 11530 case PPC::EXTSWSLI_32_64: 11531 case PPC::EXTSWSLI_32_64_rec: 11532 case PPC::EXTSWSLI_rec: 11533 case PPC::EXTSW_32: 11534 case PPC::EXTSW_32_64: 11535 case PPC::EXTSW_32_64_rec: 11536 case PPC::EXTSW_rec: 11537 case PPC::SRAW: 11538 case PPC::SRAWI: 11539 case PPC::SRAWI_rec: 11540 case PPC::SRAW_rec: 11541 return true; 11542 } 11543 return false; 11544 } 11545 11546 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary( 11547 MachineInstr &MI, MachineBasicBlock *BB, 11548 bool is8bit, // operation 11549 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const { 11550 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 11551 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 11552 11553 // If this is a signed comparison and the value being compared is not known 11554 // to be sign extended, sign extend it here. 11555 DebugLoc dl = MI.getDebugLoc(); 11556 MachineFunction *F = BB->getParent(); 11557 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11558 Register incr = MI.getOperand(3).getReg(); 11559 bool IsSignExtended = Register::isVirtualRegister(incr) && 11560 isSignExtended(*RegInfo.getVRegDef(incr), TII); 11561 11562 if (CmpOpcode == PPC::CMPW && !IsSignExtended) { 11563 Register ValueReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 11564 BuildMI(*BB, MI, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueReg) 11565 .addReg(MI.getOperand(3).getReg()); 11566 MI.getOperand(3).setReg(ValueReg); 11567 } 11568 // If we support part-word atomic mnemonics, just use them 11569 if (Subtarget.hasPartwordAtomics()) 11570 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode, 11571 CmpPred); 11572 11573 // In 64 bit mode we have to use 64 bits for addresses, even though the 11574 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 11575 // registers without caring whether they're 32 or 64, but here we're 11576 // doing actual arithmetic on the addresses. 11577 bool is64bit = Subtarget.isPPC64(); 11578 bool isLittleEndian = Subtarget.isLittleEndian(); 11579 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 11580 11581 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11582 MachineFunction::iterator It = ++BB->getIterator(); 11583 11584 Register dest = MI.getOperand(0).getReg(); 11585 Register ptrA = MI.getOperand(1).getReg(); 11586 Register ptrB = MI.getOperand(2).getReg(); 11587 11588 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 11589 MachineBasicBlock *loop2MBB = 11590 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 11591 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11592 F->insert(It, loopMBB); 11593 if (CmpOpcode) 11594 F->insert(It, loop2MBB); 11595 F->insert(It, exitMBB); 11596 exitMBB->splice(exitMBB->begin(), BB, 11597 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11598 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11599 11600 const TargetRegisterClass *RC = 11601 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11602 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 11603 11604 Register PtrReg = RegInfo.createVirtualRegister(RC); 11605 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 11606 Register ShiftReg = 11607 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 11608 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC); 11609 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 11610 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 11611 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 11612 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 11613 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC); 11614 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 11615 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 11616 Register SrwDestReg = RegInfo.createVirtualRegister(GPRC); 11617 Register Ptr1Reg; 11618 Register TmpReg = 11619 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC); 11620 11621 // thisMBB: 11622 // ... 11623 // fallthrough --> loopMBB 11624 BB->addSuccessor(loopMBB); 11625 11626 // The 4-byte load must be aligned, while a char or short may be 11627 // anywhere in the word. Hence all this nasty bookkeeping code. 11628 // add ptr1, ptrA, ptrB [copy if ptrA==0] 11629 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 11630 // xori shift, shift1, 24 [16] 11631 // rlwinm ptr, ptr1, 0, 0, 29 11632 // slw incr2, incr, shift 11633 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 11634 // slw mask, mask2, shift 11635 // loopMBB: 11636 // lwarx tmpDest, ptr 11637 // add tmp, tmpDest, incr2 11638 // andc tmp2, tmpDest, mask 11639 // and tmp3, tmp, mask 11640 // or tmp4, tmp3, tmp2 11641 // stwcx. tmp4, ptr 11642 // bne- loopMBB 11643 // fallthrough --> exitMBB 11644 // srw SrwDest, tmpDest, shift 11645 // rlwinm SrwDest, SrwDest, 0, 24 [16], 31 11646 if (ptrA != ZeroReg) { 11647 Ptr1Reg = RegInfo.createVirtualRegister(RC); 11648 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 11649 .addReg(ptrA) 11650 .addReg(ptrB); 11651 } else { 11652 Ptr1Reg = ptrB; 11653 } 11654 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 11655 // mode. 11656 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 11657 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 11658 .addImm(3) 11659 .addImm(27) 11660 .addImm(is8bit ? 28 : 27); 11661 if (!isLittleEndian) 11662 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 11663 .addReg(Shift1Reg) 11664 .addImm(is8bit ? 24 : 16); 11665 if (is64bit) 11666 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 11667 .addReg(Ptr1Reg) 11668 .addImm(0) 11669 .addImm(61); 11670 else 11671 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 11672 .addReg(Ptr1Reg) 11673 .addImm(0) 11674 .addImm(0) 11675 .addImm(29); 11676 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg); 11677 if (is8bit) 11678 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 11679 else { 11680 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 11681 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 11682 .addReg(Mask3Reg) 11683 .addImm(65535); 11684 } 11685 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 11686 .addReg(Mask2Reg) 11687 .addReg(ShiftReg); 11688 11689 BB = loopMBB; 11690 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 11691 .addReg(ZeroReg) 11692 .addReg(PtrReg); 11693 if (BinOpcode) 11694 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 11695 .addReg(Incr2Reg) 11696 .addReg(TmpDestReg); 11697 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 11698 .addReg(TmpDestReg) 11699 .addReg(MaskReg); 11700 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg); 11701 if (CmpOpcode) { 11702 // For unsigned comparisons, we can directly compare the shifted values. 11703 // For signed comparisons we shift and sign extend. 11704 Register SReg = RegInfo.createVirtualRegister(GPRC); 11705 BuildMI(BB, dl, TII->get(PPC::AND), SReg) 11706 .addReg(TmpDestReg) 11707 .addReg(MaskReg); 11708 unsigned ValueReg = SReg; 11709 unsigned CmpReg = Incr2Reg; 11710 if (CmpOpcode == PPC::CMPW) { 11711 ValueReg = RegInfo.createVirtualRegister(GPRC); 11712 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 11713 .addReg(SReg) 11714 .addReg(ShiftReg); 11715 Register ValueSReg = RegInfo.createVirtualRegister(GPRC); 11716 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 11717 .addReg(ValueReg); 11718 ValueReg = ValueSReg; 11719 CmpReg = incr; 11720 } 11721 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 11722 .addReg(CmpReg) 11723 .addReg(ValueReg); 11724 BuildMI(BB, dl, TII->get(PPC::BCC)) 11725 .addImm(CmpPred) 11726 .addReg(PPC::CR0) 11727 .addMBB(exitMBB); 11728 BB->addSuccessor(loop2MBB); 11729 BB->addSuccessor(exitMBB); 11730 BB = loop2MBB; 11731 } 11732 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg); 11733 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11734 .addReg(Tmp4Reg) 11735 .addReg(ZeroReg) 11736 .addReg(PtrReg); 11737 BuildMI(BB, dl, TII->get(PPC::BCC)) 11738 .addImm(PPC::PRED_NE) 11739 .addReg(PPC::CR0) 11740 .addMBB(loopMBB); 11741 BB->addSuccessor(loopMBB); 11742 BB->addSuccessor(exitMBB); 11743 11744 // exitMBB: 11745 // ... 11746 BB = exitMBB; 11747 // Since the shift amount is not a constant, we need to clear 11748 // the upper bits with a separate RLWINM. 11749 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::RLWINM), dest) 11750 .addReg(SrwDestReg) 11751 .addImm(0) 11752 .addImm(is8bit ? 24 : 16) 11753 .addImm(31); 11754 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), SrwDestReg) 11755 .addReg(TmpDestReg) 11756 .addReg(ShiftReg); 11757 return BB; 11758 } 11759 11760 llvm::MachineBasicBlock * 11761 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 11762 MachineBasicBlock *MBB) const { 11763 DebugLoc DL = MI.getDebugLoc(); 11764 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11765 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 11766 11767 MachineFunction *MF = MBB->getParent(); 11768 MachineRegisterInfo &MRI = MF->getRegInfo(); 11769 11770 const BasicBlock *BB = MBB->getBasicBlock(); 11771 MachineFunction::iterator I = ++MBB->getIterator(); 11772 11773 Register DstReg = MI.getOperand(0).getReg(); 11774 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 11775 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 11776 Register mainDstReg = MRI.createVirtualRegister(RC); 11777 Register restoreDstReg = MRI.createVirtualRegister(RC); 11778 11779 MVT PVT = getPointerTy(MF->getDataLayout()); 11780 assert((PVT == MVT::i64 || PVT == MVT::i32) && 11781 "Invalid Pointer Size!"); 11782 // For v = setjmp(buf), we generate 11783 // 11784 // thisMBB: 11785 // SjLjSetup mainMBB 11786 // bl mainMBB 11787 // v_restore = 1 11788 // b sinkMBB 11789 // 11790 // mainMBB: 11791 // buf[LabelOffset] = LR 11792 // v_main = 0 11793 // 11794 // sinkMBB: 11795 // v = phi(main, restore) 11796 // 11797 11798 MachineBasicBlock *thisMBB = MBB; 11799 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 11800 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 11801 MF->insert(I, mainMBB); 11802 MF->insert(I, sinkMBB); 11803 11804 MachineInstrBuilder MIB; 11805 11806 // Transfer the remainder of BB and its successor edges to sinkMBB. 11807 sinkMBB->splice(sinkMBB->begin(), MBB, 11808 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 11809 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 11810 11811 // Note that the structure of the jmp_buf used here is not compatible 11812 // with that used by libc, and is not designed to be. Specifically, it 11813 // stores only those 'reserved' registers that LLVM does not otherwise 11814 // understand how to spill. Also, by convention, by the time this 11815 // intrinsic is called, Clang has already stored the frame address in the 11816 // first slot of the buffer and stack address in the third. Following the 11817 // X86 target code, we'll store the jump address in the second slot. We also 11818 // need to save the TOC pointer (R2) to handle jumps between shared 11819 // libraries, and that will be stored in the fourth slot. The thread 11820 // identifier (R13) is not affected. 11821 11822 // thisMBB: 11823 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 11824 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 11825 const int64_t BPOffset = 4 * PVT.getStoreSize(); 11826 11827 // Prepare IP either in reg. 11828 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 11829 Register LabelReg = MRI.createVirtualRegister(PtrRC); 11830 Register BufReg = MI.getOperand(1).getReg(); 11831 11832 if (Subtarget.is64BitELFABI()) { 11833 setUsesTOCBasePtr(*MBB->getParent()); 11834 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 11835 .addReg(PPC::X2) 11836 .addImm(TOCOffset) 11837 .addReg(BufReg) 11838 .cloneMemRefs(MI); 11839 } 11840 11841 // Naked functions never have a base pointer, and so we use r1. For all 11842 // other functions, this decision must be delayed until during PEI. 11843 unsigned BaseReg; 11844 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 11845 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 11846 else 11847 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 11848 11849 MIB = BuildMI(*thisMBB, MI, DL, 11850 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 11851 .addReg(BaseReg) 11852 .addImm(BPOffset) 11853 .addReg(BufReg) 11854 .cloneMemRefs(MI); 11855 11856 // Setup 11857 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 11858 MIB.addRegMask(TRI->getNoPreservedMask()); 11859 11860 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 11861 11862 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 11863 .addMBB(mainMBB); 11864 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 11865 11866 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 11867 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 11868 11869 // mainMBB: 11870 // mainDstReg = 0 11871 MIB = 11872 BuildMI(mainMBB, DL, 11873 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 11874 11875 // Store IP 11876 if (Subtarget.isPPC64()) { 11877 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 11878 .addReg(LabelReg) 11879 .addImm(LabelOffset) 11880 .addReg(BufReg); 11881 } else { 11882 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 11883 .addReg(LabelReg) 11884 .addImm(LabelOffset) 11885 .addReg(BufReg); 11886 } 11887 MIB.cloneMemRefs(MI); 11888 11889 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 11890 mainMBB->addSuccessor(sinkMBB); 11891 11892 // sinkMBB: 11893 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 11894 TII->get(PPC::PHI), DstReg) 11895 .addReg(mainDstReg).addMBB(mainMBB) 11896 .addReg(restoreDstReg).addMBB(thisMBB); 11897 11898 MI.eraseFromParent(); 11899 return sinkMBB; 11900 } 11901 11902 MachineBasicBlock * 11903 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 11904 MachineBasicBlock *MBB) const { 11905 DebugLoc DL = MI.getDebugLoc(); 11906 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11907 11908 MachineFunction *MF = MBB->getParent(); 11909 MachineRegisterInfo &MRI = MF->getRegInfo(); 11910 11911 MVT PVT = getPointerTy(MF->getDataLayout()); 11912 assert((PVT == MVT::i64 || PVT == MVT::i32) && 11913 "Invalid Pointer Size!"); 11914 11915 const TargetRegisterClass *RC = 11916 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11917 Register Tmp = MRI.createVirtualRegister(RC); 11918 // Since FP is only updated here but NOT referenced, it's treated as GPR. 11919 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 11920 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 11921 unsigned BP = 11922 (PVT == MVT::i64) 11923 ? PPC::X30 11924 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 11925 : PPC::R30); 11926 11927 MachineInstrBuilder MIB; 11928 11929 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 11930 const int64_t SPOffset = 2 * PVT.getStoreSize(); 11931 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 11932 const int64_t BPOffset = 4 * PVT.getStoreSize(); 11933 11934 Register BufReg = MI.getOperand(0).getReg(); 11935 11936 // Reload FP (the jumped-to function may not have had a 11937 // frame pointer, and if so, then its r31 will be restored 11938 // as necessary). 11939 if (PVT == MVT::i64) { 11940 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 11941 .addImm(0) 11942 .addReg(BufReg); 11943 } else { 11944 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 11945 .addImm(0) 11946 .addReg(BufReg); 11947 } 11948 MIB.cloneMemRefs(MI); 11949 11950 // Reload IP 11951 if (PVT == MVT::i64) { 11952 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 11953 .addImm(LabelOffset) 11954 .addReg(BufReg); 11955 } else { 11956 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 11957 .addImm(LabelOffset) 11958 .addReg(BufReg); 11959 } 11960 MIB.cloneMemRefs(MI); 11961 11962 // Reload SP 11963 if (PVT == MVT::i64) { 11964 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 11965 .addImm(SPOffset) 11966 .addReg(BufReg); 11967 } else { 11968 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 11969 .addImm(SPOffset) 11970 .addReg(BufReg); 11971 } 11972 MIB.cloneMemRefs(MI); 11973 11974 // Reload BP 11975 if (PVT == MVT::i64) { 11976 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 11977 .addImm(BPOffset) 11978 .addReg(BufReg); 11979 } else { 11980 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 11981 .addImm(BPOffset) 11982 .addReg(BufReg); 11983 } 11984 MIB.cloneMemRefs(MI); 11985 11986 // Reload TOC 11987 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 11988 setUsesTOCBasePtr(*MBB->getParent()); 11989 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 11990 .addImm(TOCOffset) 11991 .addReg(BufReg) 11992 .cloneMemRefs(MI); 11993 } 11994 11995 // Jump 11996 BuildMI(*MBB, MI, DL, 11997 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 11998 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 11999 12000 MI.eraseFromParent(); 12001 return MBB; 12002 } 12003 12004 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const { 12005 // If the function specifically requests inline stack probes, emit them. 12006 if (MF.getFunction().hasFnAttribute("probe-stack")) 12007 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() == 12008 "inline-asm"; 12009 return false; 12010 } 12011 12012 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const { 12013 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 12014 unsigned StackAlign = TFI->getStackAlignment(); 12015 assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) && 12016 "Unexpected stack alignment"); 12017 // The default stack probe size is 4096 if the function has no 12018 // stack-probe-size attribute. 12019 unsigned StackProbeSize = 4096; 12020 const Function &Fn = MF.getFunction(); 12021 if (Fn.hasFnAttribute("stack-probe-size")) 12022 Fn.getFnAttribute("stack-probe-size") 12023 .getValueAsString() 12024 .getAsInteger(0, StackProbeSize); 12025 // Round down to the stack alignment. 12026 StackProbeSize &= ~(StackAlign - 1); 12027 return StackProbeSize ? StackProbeSize : StackAlign; 12028 } 12029 12030 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted 12031 // into three phases. In the first phase, it uses pseudo instruction 12032 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and 12033 // FinalStackPtr. In the second phase, it generates a loop for probing blocks. 12034 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of 12035 // MaxCallFrameSize so that it can calculate correct data area pointer. 12036 MachineBasicBlock * 12037 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI, 12038 MachineBasicBlock *MBB) const { 12039 const bool isPPC64 = Subtarget.isPPC64(); 12040 MachineFunction *MF = MBB->getParent(); 12041 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 12042 DebugLoc DL = MI.getDebugLoc(); 12043 const unsigned ProbeSize = getStackProbeSize(*MF); 12044 const BasicBlock *ProbedBB = MBB->getBasicBlock(); 12045 MachineRegisterInfo &MRI = MF->getRegInfo(); 12046 // The CFG of probing stack looks as 12047 // +-----+ 12048 // | MBB | 12049 // +--+--+ 12050 // | 12051 // +----v----+ 12052 // +--->+ TestMBB +---+ 12053 // | +----+----+ | 12054 // | | | 12055 // | +-----v----+ | 12056 // +---+ BlockMBB | | 12057 // +----------+ | 12058 // | 12059 // +---------+ | 12060 // | TailMBB +<--+ 12061 // +---------+ 12062 // In MBB, calculate previous frame pointer and final stack pointer. 12063 // In TestMBB, test if sp is equal to final stack pointer, if so, jump to 12064 // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB. 12065 // TailMBB is spliced via \p MI. 12066 MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB); 12067 MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB); 12068 MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB); 12069 12070 MachineFunction::iterator MBBIter = ++MBB->getIterator(); 12071 MF->insert(MBBIter, TestMBB); 12072 MF->insert(MBBIter, BlockMBB); 12073 MF->insert(MBBIter, TailMBB); 12074 12075 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; 12076 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 12077 12078 Register DstReg = MI.getOperand(0).getReg(); 12079 Register NegSizeReg = MI.getOperand(1).getReg(); 12080 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1; 12081 Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 12082 Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 12083 Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 12084 12085 // Since value of NegSizeReg might be realigned in prologepilog, insert a 12086 // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and 12087 // NegSize. 12088 unsigned ProbeOpc; 12089 if (!MRI.hasOneNonDBGUse(NegSizeReg)) 12090 ProbeOpc = 12091 isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32; 12092 else 12093 // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg 12094 // and NegSizeReg will be allocated in the same phyreg to avoid 12095 // redundant copy when NegSizeReg has only one use which is current MI and 12096 // will be replaced by PREPARE_PROBED_ALLOCA then. 12097 ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64 12098 : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32; 12099 BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer) 12100 .addDef(ActualNegSizeReg) 12101 .addReg(NegSizeReg) 12102 .add(MI.getOperand(2)) 12103 .add(MI.getOperand(3)); 12104 12105 // Calculate final stack pointer, which equals to SP + ActualNegSize. 12106 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), 12107 FinalStackPtr) 12108 .addReg(SPReg) 12109 .addReg(ActualNegSizeReg); 12110 12111 // Materialize a scratch register for update. 12112 int64_t NegProbeSize = -(int64_t)ProbeSize; 12113 assert(isInt<32>(NegProbeSize) && "Unhandled probe size!"); 12114 Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 12115 if (!isInt<16>(NegProbeSize)) { 12116 Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 12117 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg) 12118 .addImm(NegProbeSize >> 16); 12119 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI), 12120 ScratchReg) 12121 .addReg(TempReg) 12122 .addImm(NegProbeSize & 0xFFFF); 12123 } else 12124 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg) 12125 .addImm(NegProbeSize); 12126 12127 { 12128 // Probing leading residual part. 12129 Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 12130 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div) 12131 .addReg(ActualNegSizeReg) 12132 .addReg(ScratchReg); 12133 Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 12134 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul) 12135 .addReg(Div) 12136 .addReg(ScratchReg); 12137 Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 12138 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod) 12139 .addReg(Mul) 12140 .addReg(ActualNegSizeReg); 12141 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg) 12142 .addReg(FramePointer) 12143 .addReg(SPReg) 12144 .addReg(NegMod); 12145 } 12146 12147 { 12148 // Remaining part should be multiple of ProbeSize. 12149 Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass); 12150 BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult) 12151 .addReg(SPReg) 12152 .addReg(FinalStackPtr); 12153 BuildMI(TestMBB, DL, TII->get(PPC::BCC)) 12154 .addImm(PPC::PRED_EQ) 12155 .addReg(CmpResult) 12156 .addMBB(TailMBB); 12157 TestMBB->addSuccessor(BlockMBB); 12158 TestMBB->addSuccessor(TailMBB); 12159 } 12160 12161 { 12162 // Touch the block. 12163 // |P...|P...|P... 12164 BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg) 12165 .addReg(FramePointer) 12166 .addReg(SPReg) 12167 .addReg(ScratchReg); 12168 BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB); 12169 BlockMBB->addSuccessor(TestMBB); 12170 } 12171 12172 // Calculation of MaxCallFrameSize is deferred to prologepilog, use 12173 // DYNAREAOFFSET pseudo instruction to get the future result. 12174 Register MaxCallFrameSizeReg = 12175 MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC); 12176 BuildMI(TailMBB, DL, 12177 TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET), 12178 MaxCallFrameSizeReg) 12179 .add(MI.getOperand(2)) 12180 .add(MI.getOperand(3)); 12181 BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg) 12182 .addReg(SPReg) 12183 .addReg(MaxCallFrameSizeReg); 12184 12185 // Splice instructions after MI to TailMBB. 12186 TailMBB->splice(TailMBB->end(), MBB, 12187 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 12188 TailMBB->transferSuccessorsAndUpdatePHIs(MBB); 12189 MBB->addSuccessor(TestMBB); 12190 12191 // Delete the pseudo instruction. 12192 MI.eraseFromParent(); 12193 12194 ++NumDynamicAllocaProbed; 12195 return TailMBB; 12196 } 12197 12198 MachineBasicBlock * 12199 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 12200 MachineBasicBlock *BB) const { 12201 if (MI.getOpcode() == TargetOpcode::STACKMAP || 12202 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 12203 if (Subtarget.is64BitELFABI() && 12204 MI.getOpcode() == TargetOpcode::PATCHPOINT && 12205 !Subtarget.isUsingPCRelativeCalls()) { 12206 // Call lowering should have added an r2 operand to indicate a dependence 12207 // on the TOC base pointer value. It can't however, because there is no 12208 // way to mark the dependence as implicit there, and so the stackmap code 12209 // will confuse it with a regular operand. Instead, add the dependence 12210 // here. 12211 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 12212 } 12213 12214 return emitPatchPoint(MI, BB); 12215 } 12216 12217 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 12218 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 12219 return emitEHSjLjSetJmp(MI, BB); 12220 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 12221 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 12222 return emitEHSjLjLongJmp(MI, BB); 12223 } 12224 12225 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 12226 12227 // To "insert" these instructions we actually have to insert their 12228 // control-flow patterns. 12229 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 12230 MachineFunction::iterator It = ++BB->getIterator(); 12231 12232 MachineFunction *F = BB->getParent(); 12233 MachineRegisterInfo &MRI = F->getRegInfo(); 12234 12235 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 12236 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 || 12237 MI.getOpcode() == PPC::SELECT_I8) { 12238 SmallVector<MachineOperand, 2> Cond; 12239 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 12240 MI.getOpcode() == PPC::SELECT_CC_I8) 12241 Cond.push_back(MI.getOperand(4)); 12242 else 12243 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 12244 Cond.push_back(MI.getOperand(1)); 12245 12246 DebugLoc dl = MI.getDebugLoc(); 12247 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 12248 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 12249 } else if (MI.getOpcode() == PPC::SELECT_CC_F4 || 12250 MI.getOpcode() == PPC::SELECT_CC_F8 || 12251 MI.getOpcode() == PPC::SELECT_CC_F16 || 12252 MI.getOpcode() == PPC::SELECT_CC_VRRC || 12253 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 12254 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 12255 MI.getOpcode() == PPC::SELECT_CC_VSRC || 12256 MI.getOpcode() == PPC::SELECT_CC_SPE4 || 12257 MI.getOpcode() == PPC::SELECT_CC_SPE || 12258 MI.getOpcode() == PPC::SELECT_F4 || 12259 MI.getOpcode() == PPC::SELECT_F8 || 12260 MI.getOpcode() == PPC::SELECT_F16 || 12261 MI.getOpcode() == PPC::SELECT_SPE || 12262 MI.getOpcode() == PPC::SELECT_SPE4 || 12263 MI.getOpcode() == PPC::SELECT_VRRC || 12264 MI.getOpcode() == PPC::SELECT_VSFRC || 12265 MI.getOpcode() == PPC::SELECT_VSSRC || 12266 MI.getOpcode() == PPC::SELECT_VSRC) { 12267 // The incoming instruction knows the destination vreg to set, the 12268 // condition code register to branch on, the true/false values to 12269 // select between, and a branch opcode to use. 12270 12271 // thisMBB: 12272 // ... 12273 // TrueVal = ... 12274 // cmpTY ccX, r1, r2 12275 // bCC copy1MBB 12276 // fallthrough --> copy0MBB 12277 MachineBasicBlock *thisMBB = BB; 12278 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 12279 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 12280 DebugLoc dl = MI.getDebugLoc(); 12281 F->insert(It, copy0MBB); 12282 F->insert(It, sinkMBB); 12283 12284 // Transfer the remainder of BB and its successor edges to sinkMBB. 12285 sinkMBB->splice(sinkMBB->begin(), BB, 12286 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 12287 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 12288 12289 // Next, add the true and fallthrough blocks as its successors. 12290 BB->addSuccessor(copy0MBB); 12291 BB->addSuccessor(sinkMBB); 12292 12293 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 12294 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 12295 MI.getOpcode() == PPC::SELECT_F16 || 12296 MI.getOpcode() == PPC::SELECT_SPE4 || 12297 MI.getOpcode() == PPC::SELECT_SPE || 12298 MI.getOpcode() == PPC::SELECT_VRRC || 12299 MI.getOpcode() == PPC::SELECT_VSFRC || 12300 MI.getOpcode() == PPC::SELECT_VSSRC || 12301 MI.getOpcode() == PPC::SELECT_VSRC) { 12302 BuildMI(BB, dl, TII->get(PPC::BC)) 12303 .addReg(MI.getOperand(1).getReg()) 12304 .addMBB(sinkMBB); 12305 } else { 12306 unsigned SelectPred = MI.getOperand(4).getImm(); 12307 BuildMI(BB, dl, TII->get(PPC::BCC)) 12308 .addImm(SelectPred) 12309 .addReg(MI.getOperand(1).getReg()) 12310 .addMBB(sinkMBB); 12311 } 12312 12313 // copy0MBB: 12314 // %FalseValue = ... 12315 // # fallthrough to sinkMBB 12316 BB = copy0MBB; 12317 12318 // Update machine-CFG edges 12319 BB->addSuccessor(sinkMBB); 12320 12321 // sinkMBB: 12322 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 12323 // ... 12324 BB = sinkMBB; 12325 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 12326 .addReg(MI.getOperand(3).getReg()) 12327 .addMBB(copy0MBB) 12328 .addReg(MI.getOperand(2).getReg()) 12329 .addMBB(thisMBB); 12330 } else if (MI.getOpcode() == PPC::ReadTB) { 12331 // To read the 64-bit time-base register on a 32-bit target, we read the 12332 // two halves. Should the counter have wrapped while it was being read, we 12333 // need to try again. 12334 // ... 12335 // readLoop: 12336 // mfspr Rx,TBU # load from TBU 12337 // mfspr Ry,TB # load from TB 12338 // mfspr Rz,TBU # load from TBU 12339 // cmpw crX,Rx,Rz # check if 'old'='new' 12340 // bne readLoop # branch if they're not equal 12341 // ... 12342 12343 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 12344 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 12345 DebugLoc dl = MI.getDebugLoc(); 12346 F->insert(It, readMBB); 12347 F->insert(It, sinkMBB); 12348 12349 // Transfer the remainder of BB and its successor edges to sinkMBB. 12350 sinkMBB->splice(sinkMBB->begin(), BB, 12351 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 12352 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 12353 12354 BB->addSuccessor(readMBB); 12355 BB = readMBB; 12356 12357 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12358 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 12359 Register LoReg = MI.getOperand(0).getReg(); 12360 Register HiReg = MI.getOperand(1).getReg(); 12361 12362 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 12363 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 12364 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 12365 12366 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 12367 12368 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 12369 .addReg(HiReg) 12370 .addReg(ReadAgainReg); 12371 BuildMI(BB, dl, TII->get(PPC::BCC)) 12372 .addImm(PPC::PRED_NE) 12373 .addReg(CmpReg) 12374 .addMBB(readMBB); 12375 12376 BB->addSuccessor(readMBB); 12377 BB->addSuccessor(sinkMBB); 12378 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 12379 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 12380 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 12381 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 12382 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 12383 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 12384 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 12385 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 12386 12387 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 12388 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 12389 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 12390 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 12391 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 12392 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 12393 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 12394 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 12395 12396 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 12397 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 12398 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 12399 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 12400 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 12401 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 12402 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 12403 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 12404 12405 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 12406 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 12407 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 12408 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 12409 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 12410 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 12411 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 12412 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 12413 12414 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 12415 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 12416 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 12417 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 12418 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 12419 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 12420 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 12421 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 12422 12423 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 12424 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 12425 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 12426 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 12427 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 12428 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 12429 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 12430 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 12431 12432 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 12433 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 12434 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 12435 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 12436 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 12437 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 12438 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 12439 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 12440 12441 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 12442 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 12443 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 12444 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 12445 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 12446 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 12447 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 12448 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 12449 12450 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 12451 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 12452 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 12453 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 12454 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 12455 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 12456 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 12457 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 12458 12459 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 12460 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 12461 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 12462 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 12463 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 12464 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 12465 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 12466 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 12467 12468 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 12469 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 12470 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 12471 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 12472 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 12473 BB = EmitAtomicBinary(MI, BB, 4, 0); 12474 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 12475 BB = EmitAtomicBinary(MI, BB, 8, 0); 12476 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 12477 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 12478 (Subtarget.hasPartwordAtomics() && 12479 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 12480 (Subtarget.hasPartwordAtomics() && 12481 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 12482 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 12483 12484 auto LoadMnemonic = PPC::LDARX; 12485 auto StoreMnemonic = PPC::STDCX; 12486 switch (MI.getOpcode()) { 12487 default: 12488 llvm_unreachable("Compare and swap of unknown size"); 12489 case PPC::ATOMIC_CMP_SWAP_I8: 12490 LoadMnemonic = PPC::LBARX; 12491 StoreMnemonic = PPC::STBCX; 12492 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 12493 break; 12494 case PPC::ATOMIC_CMP_SWAP_I16: 12495 LoadMnemonic = PPC::LHARX; 12496 StoreMnemonic = PPC::STHCX; 12497 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 12498 break; 12499 case PPC::ATOMIC_CMP_SWAP_I32: 12500 LoadMnemonic = PPC::LWARX; 12501 StoreMnemonic = PPC::STWCX; 12502 break; 12503 case PPC::ATOMIC_CMP_SWAP_I64: 12504 LoadMnemonic = PPC::LDARX; 12505 StoreMnemonic = PPC::STDCX; 12506 break; 12507 } 12508 Register dest = MI.getOperand(0).getReg(); 12509 Register ptrA = MI.getOperand(1).getReg(); 12510 Register ptrB = MI.getOperand(2).getReg(); 12511 Register oldval = MI.getOperand(3).getReg(); 12512 Register newval = MI.getOperand(4).getReg(); 12513 DebugLoc dl = MI.getDebugLoc(); 12514 12515 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 12516 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 12517 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 12518 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 12519 F->insert(It, loop1MBB); 12520 F->insert(It, loop2MBB); 12521 F->insert(It, midMBB); 12522 F->insert(It, exitMBB); 12523 exitMBB->splice(exitMBB->begin(), BB, 12524 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 12525 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 12526 12527 // thisMBB: 12528 // ... 12529 // fallthrough --> loopMBB 12530 BB->addSuccessor(loop1MBB); 12531 12532 // loop1MBB: 12533 // l[bhwd]arx dest, ptr 12534 // cmp[wd] dest, oldval 12535 // bne- midMBB 12536 // loop2MBB: 12537 // st[bhwd]cx. newval, ptr 12538 // bne- loopMBB 12539 // b exitBB 12540 // midMBB: 12541 // st[bhwd]cx. dest, ptr 12542 // exitBB: 12543 BB = loop1MBB; 12544 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB); 12545 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 12546 .addReg(oldval) 12547 .addReg(dest); 12548 BuildMI(BB, dl, TII->get(PPC::BCC)) 12549 .addImm(PPC::PRED_NE) 12550 .addReg(PPC::CR0) 12551 .addMBB(midMBB); 12552 BB->addSuccessor(loop2MBB); 12553 BB->addSuccessor(midMBB); 12554 12555 BB = loop2MBB; 12556 BuildMI(BB, dl, TII->get(StoreMnemonic)) 12557 .addReg(newval) 12558 .addReg(ptrA) 12559 .addReg(ptrB); 12560 BuildMI(BB, dl, TII->get(PPC::BCC)) 12561 .addImm(PPC::PRED_NE) 12562 .addReg(PPC::CR0) 12563 .addMBB(loop1MBB); 12564 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 12565 BB->addSuccessor(loop1MBB); 12566 BB->addSuccessor(exitMBB); 12567 12568 BB = midMBB; 12569 BuildMI(BB, dl, TII->get(StoreMnemonic)) 12570 .addReg(dest) 12571 .addReg(ptrA) 12572 .addReg(ptrB); 12573 BB->addSuccessor(exitMBB); 12574 12575 // exitMBB: 12576 // ... 12577 BB = exitMBB; 12578 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 12579 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 12580 // We must use 64-bit registers for addresses when targeting 64-bit, 12581 // since we're actually doing arithmetic on them. Other registers 12582 // can be 32-bit. 12583 bool is64bit = Subtarget.isPPC64(); 12584 bool isLittleEndian = Subtarget.isLittleEndian(); 12585 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 12586 12587 Register dest = MI.getOperand(0).getReg(); 12588 Register ptrA = MI.getOperand(1).getReg(); 12589 Register ptrB = MI.getOperand(2).getReg(); 12590 Register oldval = MI.getOperand(3).getReg(); 12591 Register newval = MI.getOperand(4).getReg(); 12592 DebugLoc dl = MI.getDebugLoc(); 12593 12594 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 12595 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 12596 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 12597 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 12598 F->insert(It, loop1MBB); 12599 F->insert(It, loop2MBB); 12600 F->insert(It, midMBB); 12601 F->insert(It, exitMBB); 12602 exitMBB->splice(exitMBB->begin(), BB, 12603 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 12604 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 12605 12606 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12607 const TargetRegisterClass *RC = 12608 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 12609 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 12610 12611 Register PtrReg = RegInfo.createVirtualRegister(RC); 12612 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 12613 Register ShiftReg = 12614 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 12615 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC); 12616 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC); 12617 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC); 12618 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC); 12619 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 12620 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 12621 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 12622 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 12623 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 12624 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 12625 Register Ptr1Reg; 12626 Register TmpReg = RegInfo.createVirtualRegister(GPRC); 12627 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 12628 // thisMBB: 12629 // ... 12630 // fallthrough --> loopMBB 12631 BB->addSuccessor(loop1MBB); 12632 12633 // The 4-byte load must be aligned, while a char or short may be 12634 // anywhere in the word. Hence all this nasty bookkeeping code. 12635 // add ptr1, ptrA, ptrB [copy if ptrA==0] 12636 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 12637 // xori shift, shift1, 24 [16] 12638 // rlwinm ptr, ptr1, 0, 0, 29 12639 // slw newval2, newval, shift 12640 // slw oldval2, oldval,shift 12641 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 12642 // slw mask, mask2, shift 12643 // and newval3, newval2, mask 12644 // and oldval3, oldval2, mask 12645 // loop1MBB: 12646 // lwarx tmpDest, ptr 12647 // and tmp, tmpDest, mask 12648 // cmpw tmp, oldval3 12649 // bne- midMBB 12650 // loop2MBB: 12651 // andc tmp2, tmpDest, mask 12652 // or tmp4, tmp2, newval3 12653 // stwcx. tmp4, ptr 12654 // bne- loop1MBB 12655 // b exitBB 12656 // midMBB: 12657 // stwcx. tmpDest, ptr 12658 // exitBB: 12659 // srw dest, tmpDest, shift 12660 if (ptrA != ZeroReg) { 12661 Ptr1Reg = RegInfo.createVirtualRegister(RC); 12662 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 12663 .addReg(ptrA) 12664 .addReg(ptrB); 12665 } else { 12666 Ptr1Reg = ptrB; 12667 } 12668 12669 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 12670 // mode. 12671 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 12672 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 12673 .addImm(3) 12674 .addImm(27) 12675 .addImm(is8bit ? 28 : 27); 12676 if (!isLittleEndian) 12677 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 12678 .addReg(Shift1Reg) 12679 .addImm(is8bit ? 24 : 16); 12680 if (is64bit) 12681 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 12682 .addReg(Ptr1Reg) 12683 .addImm(0) 12684 .addImm(61); 12685 else 12686 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 12687 .addReg(Ptr1Reg) 12688 .addImm(0) 12689 .addImm(0) 12690 .addImm(29); 12691 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 12692 .addReg(newval) 12693 .addReg(ShiftReg); 12694 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 12695 .addReg(oldval) 12696 .addReg(ShiftReg); 12697 if (is8bit) 12698 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 12699 else { 12700 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 12701 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 12702 .addReg(Mask3Reg) 12703 .addImm(65535); 12704 } 12705 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 12706 .addReg(Mask2Reg) 12707 .addReg(ShiftReg); 12708 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 12709 .addReg(NewVal2Reg) 12710 .addReg(MaskReg); 12711 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 12712 .addReg(OldVal2Reg) 12713 .addReg(MaskReg); 12714 12715 BB = loop1MBB; 12716 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 12717 .addReg(ZeroReg) 12718 .addReg(PtrReg); 12719 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg) 12720 .addReg(TmpDestReg) 12721 .addReg(MaskReg); 12722 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 12723 .addReg(TmpReg) 12724 .addReg(OldVal3Reg); 12725 BuildMI(BB, dl, TII->get(PPC::BCC)) 12726 .addImm(PPC::PRED_NE) 12727 .addReg(PPC::CR0) 12728 .addMBB(midMBB); 12729 BB->addSuccessor(loop2MBB); 12730 BB->addSuccessor(midMBB); 12731 12732 BB = loop2MBB; 12733 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 12734 .addReg(TmpDestReg) 12735 .addReg(MaskReg); 12736 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg) 12737 .addReg(Tmp2Reg) 12738 .addReg(NewVal3Reg); 12739 BuildMI(BB, dl, TII->get(PPC::STWCX)) 12740 .addReg(Tmp4Reg) 12741 .addReg(ZeroReg) 12742 .addReg(PtrReg); 12743 BuildMI(BB, dl, TII->get(PPC::BCC)) 12744 .addImm(PPC::PRED_NE) 12745 .addReg(PPC::CR0) 12746 .addMBB(loop1MBB); 12747 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 12748 BB->addSuccessor(loop1MBB); 12749 BB->addSuccessor(exitMBB); 12750 12751 BB = midMBB; 12752 BuildMI(BB, dl, TII->get(PPC::STWCX)) 12753 .addReg(TmpDestReg) 12754 .addReg(ZeroReg) 12755 .addReg(PtrReg); 12756 BB->addSuccessor(exitMBB); 12757 12758 // exitMBB: 12759 // ... 12760 BB = exitMBB; 12761 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 12762 .addReg(TmpReg) 12763 .addReg(ShiftReg); 12764 } else if (MI.getOpcode() == PPC::FADDrtz) { 12765 // This pseudo performs an FADD with rounding mode temporarily forced 12766 // to round-to-zero. We emit this via custom inserter since the FPSCR 12767 // is not modeled at the SelectionDAG level. 12768 Register Dest = MI.getOperand(0).getReg(); 12769 Register Src1 = MI.getOperand(1).getReg(); 12770 Register Src2 = MI.getOperand(2).getReg(); 12771 DebugLoc dl = MI.getDebugLoc(); 12772 12773 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12774 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 12775 12776 // Save FPSCR value. 12777 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 12778 12779 // Set rounding mode to round-to-zero. 12780 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)) 12781 .addImm(31) 12782 .addReg(PPC::RM, RegState::ImplicitDefine); 12783 12784 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)) 12785 .addImm(30) 12786 .addReg(PPC::RM, RegState::ImplicitDefine); 12787 12788 // Perform addition. 12789 auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest) 12790 .addReg(Src1) 12791 .addReg(Src2); 12792 if (MI.getFlag(MachineInstr::NoFPExcept)) 12793 MIB.setMIFlag(MachineInstr::NoFPExcept); 12794 12795 // Restore FPSCR value. 12796 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 12797 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 12798 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT || 12799 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 12800 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) { 12801 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 12802 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) 12803 ? PPC::ANDI8_rec 12804 : PPC::ANDI_rec; 12805 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 12806 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8); 12807 12808 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12809 Register Dest = RegInfo.createVirtualRegister( 12810 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass); 12811 12812 DebugLoc Dl = MI.getDebugLoc(); 12813 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest) 12814 .addReg(MI.getOperand(1).getReg()) 12815 .addImm(1); 12816 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 12817 MI.getOperand(0).getReg()) 12818 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT); 12819 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 12820 DebugLoc Dl = MI.getDebugLoc(); 12821 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12822 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 12823 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 12824 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 12825 MI.getOperand(0).getReg()) 12826 .addReg(CRReg); 12827 } else if (MI.getOpcode() == PPC::TBEGIN_RET) { 12828 DebugLoc Dl = MI.getDebugLoc(); 12829 unsigned Imm = MI.getOperand(1).getImm(); 12830 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm); 12831 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 12832 MI.getOperand(0).getReg()) 12833 .addReg(PPC::CR0EQ); 12834 } else if (MI.getOpcode() == PPC::SETRNDi) { 12835 DebugLoc dl = MI.getDebugLoc(); 12836 Register OldFPSCRReg = MI.getOperand(0).getReg(); 12837 12838 // Save FPSCR value. 12839 if (MRI.use_empty(OldFPSCRReg)) 12840 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), OldFPSCRReg); 12841 else 12842 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 12843 12844 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has 12845 // the following settings: 12846 // 00 Round to nearest 12847 // 01 Round to 0 12848 // 10 Round to +inf 12849 // 11 Round to -inf 12850 12851 // When the operand is immediate, using the two least significant bits of 12852 // the immediate to set the bits 62:63 of FPSCR. 12853 unsigned Mode = MI.getOperand(1).getImm(); 12854 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0)) 12855 .addImm(31) 12856 .addReg(PPC::RM, RegState::ImplicitDefine); 12857 12858 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0)) 12859 .addImm(30) 12860 .addReg(PPC::RM, RegState::ImplicitDefine); 12861 } else if (MI.getOpcode() == PPC::SETRND) { 12862 DebugLoc dl = MI.getDebugLoc(); 12863 12864 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg 12865 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg. 12866 // If the target doesn't have DirectMove, we should use stack to do the 12867 // conversion, because the target doesn't have the instructions like mtvsrd 12868 // or mfvsrd to do this conversion directly. 12869 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) { 12870 if (Subtarget.hasDirectMove()) { 12871 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg) 12872 .addReg(SrcReg); 12873 } else { 12874 // Use stack to do the register copy. 12875 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD; 12876 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12877 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg); 12878 if (RC == &PPC::F8RCRegClass) { 12879 // Copy register from F8RCRegClass to G8RCRegclass. 12880 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) && 12881 "Unsupported RegClass."); 12882 12883 StoreOp = PPC::STFD; 12884 LoadOp = PPC::LD; 12885 } else { 12886 // Copy register from G8RCRegClass to F8RCRegclass. 12887 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) && 12888 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) && 12889 "Unsupported RegClass."); 12890 } 12891 12892 MachineFrameInfo &MFI = F->getFrameInfo(); 12893 int FrameIdx = MFI.CreateStackObject(8, Align(8), false); 12894 12895 MachineMemOperand *MMOStore = F->getMachineMemOperand( 12896 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 12897 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), 12898 MFI.getObjectAlign(FrameIdx)); 12899 12900 // Store the SrcReg into the stack. 12901 BuildMI(*BB, MI, dl, TII->get(StoreOp)) 12902 .addReg(SrcReg) 12903 .addImm(0) 12904 .addFrameIndex(FrameIdx) 12905 .addMemOperand(MMOStore); 12906 12907 MachineMemOperand *MMOLoad = F->getMachineMemOperand( 12908 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 12909 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), 12910 MFI.getObjectAlign(FrameIdx)); 12911 12912 // Load from the stack where SrcReg is stored, and save to DestReg, 12913 // so we have done the RegClass conversion from RegClass::SrcReg to 12914 // RegClass::DestReg. 12915 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg) 12916 .addImm(0) 12917 .addFrameIndex(FrameIdx) 12918 .addMemOperand(MMOLoad); 12919 } 12920 }; 12921 12922 Register OldFPSCRReg = MI.getOperand(0).getReg(); 12923 12924 // Save FPSCR value. 12925 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 12926 12927 // When the operand is gprc register, use two least significant bits of the 12928 // register and mtfsf instruction to set the bits 62:63 of FPSCR. 12929 // 12930 // copy OldFPSCRTmpReg, OldFPSCRReg 12931 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1) 12932 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62 12933 // copy NewFPSCRReg, NewFPSCRTmpReg 12934 // mtfsf 255, NewFPSCRReg 12935 MachineOperand SrcOp = MI.getOperand(1); 12936 MachineRegisterInfo &RegInfo = F->getRegInfo(); 12937 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12938 12939 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg); 12940 12941 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12942 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12943 12944 // The first operand of INSERT_SUBREG should be a register which has 12945 // subregisters, we only care about its RegClass, so we should use an 12946 // IMPLICIT_DEF register. 12947 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg); 12948 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg) 12949 .addReg(ImDefReg) 12950 .add(SrcOp) 12951 .addImm(1); 12952 12953 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 12954 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg) 12955 .addReg(OldFPSCRTmpReg) 12956 .addReg(ExtSrcReg) 12957 .addImm(0) 12958 .addImm(62); 12959 12960 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 12961 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg); 12962 12963 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63 12964 // bits of FPSCR. 12965 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)) 12966 .addImm(255) 12967 .addReg(NewFPSCRReg) 12968 .addImm(0) 12969 .addImm(0); 12970 } else if (MI.getOpcode() == PPC::SETFLM) { 12971 DebugLoc Dl = MI.getDebugLoc(); 12972 12973 // Result of setflm is previous FPSCR content, so we need to save it first. 12974 Register OldFPSCRReg = MI.getOperand(0).getReg(); 12975 if (MRI.use_empty(OldFPSCRReg)) 12976 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::IMPLICIT_DEF), OldFPSCRReg); 12977 else 12978 BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg); 12979 12980 // Put bits in 32:63 to FPSCR. 12981 Register NewFPSCRReg = MI.getOperand(1).getReg(); 12982 BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF)) 12983 .addImm(255) 12984 .addReg(NewFPSCRReg) 12985 .addImm(0) 12986 .addImm(0); 12987 } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 || 12988 MI.getOpcode() == PPC::PROBED_ALLOCA_64) { 12989 return emitProbedAlloca(MI, BB); 12990 } else if (MI.getOpcode() == PPC::SPLIT_QUADWORD) { 12991 DebugLoc DL = MI.getDebugLoc(); 12992 Register Src = MI.getOperand(2).getReg(); 12993 Register Lo = MI.getOperand(0).getReg(); 12994 Register Hi = MI.getOperand(1).getReg(); 12995 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY)) 12996 .addDef(Lo) 12997 .addUse(Src, 0, PPC::sub_gp8_x1); 12998 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY)) 12999 .addDef(Hi) 13000 .addUse(Src, 0, PPC::sub_gp8_x0); 13001 } else if (MI.getOpcode() == PPC::LQX_PSEUDO || 13002 MI.getOpcode() == PPC::STQX_PSEUDO) { 13003 DebugLoc DL = MI.getDebugLoc(); 13004 // Ptr is used as the ptr_rc_no_r0 part 13005 // of LQ/STQ's memory operand and adding result of RA and RB, 13006 // so it has to be g8rc_and_g8rc_nox0. 13007 Register Ptr = 13008 F->getRegInfo().createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass); 13009 Register Val = MI.getOperand(0).getReg(); 13010 Register RA = MI.getOperand(1).getReg(); 13011 Register RB = MI.getOperand(2).getReg(); 13012 BuildMI(*BB, MI, DL, TII->get(PPC::ADD8), Ptr).addReg(RA).addReg(RB); 13013 BuildMI(*BB, MI, DL, 13014 MI.getOpcode() == PPC::LQX_PSEUDO ? TII->get(PPC::LQ) 13015 : TII->get(PPC::STQ)) 13016 .addReg(Val, MI.getOpcode() == PPC::LQX_PSEUDO ? RegState::Define : 0) 13017 .addImm(0) 13018 .addReg(Ptr); 13019 } else { 13020 llvm_unreachable("Unexpected instr type to insert"); 13021 } 13022 13023 MI.eraseFromParent(); // The pseudo instruction is gone now. 13024 return BB; 13025 } 13026 13027 //===----------------------------------------------------------------------===// 13028 // Target Optimization Hooks 13029 //===----------------------------------------------------------------------===// 13030 13031 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 13032 // For the estimates, convergence is quadratic, so we essentially double the 13033 // number of digits correct after every iteration. For both FRE and FRSQRTE, 13034 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 13035 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 13036 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 13037 if (VT.getScalarType() == MVT::f64) 13038 RefinementSteps++; 13039 return RefinementSteps; 13040 } 13041 13042 SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG, 13043 const DenormalMode &Mode) const { 13044 // We only have VSX Vector Test for software Square Root. 13045 EVT VT = Op.getValueType(); 13046 if (!isTypeLegal(MVT::i1) || 13047 (VT != MVT::f64 && 13048 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))) 13049 return TargetLowering::getSqrtInputTest(Op, DAG, Mode); 13050 13051 SDLoc DL(Op); 13052 // The output register of FTSQRT is CR field. 13053 SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op); 13054 // ftsqrt BF,FRB 13055 // Let e_b be the unbiased exponent of the double-precision 13056 // floating-point operand in register FRB. 13057 // fe_flag is set to 1 if either of the following conditions occurs. 13058 // - The double-precision floating-point operand in register FRB is a zero, 13059 // a NaN, or an infinity, or a negative value. 13060 // - e_b is less than or equal to -970. 13061 // Otherwise fe_flag is set to 0. 13062 // Both VSX and non-VSX versions would set EQ bit in the CR if the number is 13063 // not eligible for iteration. (zero/negative/infinity/nan or unbiased 13064 // exponent is less than -970) 13065 SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32); 13066 return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1, 13067 FTSQRT, SRIdxVal), 13068 0); 13069 } 13070 13071 SDValue 13072 PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op, 13073 SelectionDAG &DAG) const { 13074 // We only have VSX Vector Square Root. 13075 EVT VT = Op.getValueType(); 13076 if (VT != MVT::f64 && 13077 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())) 13078 return TargetLowering::getSqrtResultForDenormInput(Op, DAG); 13079 13080 return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op); 13081 } 13082 13083 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 13084 int Enabled, int &RefinementSteps, 13085 bool &UseOneConstNR, 13086 bool Reciprocal) const { 13087 EVT VT = Operand.getValueType(); 13088 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 13089 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 13090 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 13091 (VT == MVT::v2f64 && Subtarget.hasVSX())) { 13092 if (RefinementSteps == ReciprocalEstimate::Unspecified) 13093 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 13094 13095 // The Newton-Raphson computation with a single constant does not provide 13096 // enough accuracy on some CPUs. 13097 UseOneConstNR = !Subtarget.needsTwoConstNR(); 13098 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 13099 } 13100 return SDValue(); 13101 } 13102 13103 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 13104 int Enabled, 13105 int &RefinementSteps) const { 13106 EVT VT = Operand.getValueType(); 13107 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 13108 (VT == MVT::f64 && Subtarget.hasFRE()) || 13109 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 13110 (VT == MVT::v2f64 && Subtarget.hasVSX())) { 13111 if (RefinementSteps == ReciprocalEstimate::Unspecified) 13112 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 13113 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 13114 } 13115 return SDValue(); 13116 } 13117 13118 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 13119 // Note: This functionality is used only when unsafe-fp-math is enabled, and 13120 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 13121 // enabled for division), this functionality is redundant with the default 13122 // combiner logic (once the division -> reciprocal/multiply transformation 13123 // has taken place). As a result, this matters more for older cores than for 13124 // newer ones. 13125 13126 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 13127 // reciprocal if there are two or more FDIVs (for embedded cores with only 13128 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 13129 switch (Subtarget.getCPUDirective()) { 13130 default: 13131 return 3; 13132 case PPC::DIR_440: 13133 case PPC::DIR_A2: 13134 case PPC::DIR_E500: 13135 case PPC::DIR_E500mc: 13136 case PPC::DIR_E5500: 13137 return 2; 13138 } 13139 } 13140 13141 // isConsecutiveLSLoc needs to work even if all adds have not yet been 13142 // collapsed, and so we need to look through chains of them. 13143 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 13144 int64_t& Offset, SelectionDAG &DAG) { 13145 if (DAG.isBaseWithConstantOffset(Loc)) { 13146 Base = Loc.getOperand(0); 13147 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 13148 13149 // The base might itself be a base plus an offset, and if so, accumulate 13150 // that as well. 13151 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 13152 } 13153 } 13154 13155 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 13156 unsigned Bytes, int Dist, 13157 SelectionDAG &DAG) { 13158 if (VT.getSizeInBits() / 8 != Bytes) 13159 return false; 13160 13161 SDValue BaseLoc = Base->getBasePtr(); 13162 if (Loc.getOpcode() == ISD::FrameIndex) { 13163 if (BaseLoc.getOpcode() != ISD::FrameIndex) 13164 return false; 13165 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 13166 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 13167 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 13168 int FS = MFI.getObjectSize(FI); 13169 int BFS = MFI.getObjectSize(BFI); 13170 if (FS != BFS || FS != (int)Bytes) return false; 13171 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 13172 } 13173 13174 SDValue Base1 = Loc, Base2 = BaseLoc; 13175 int64_t Offset1 = 0, Offset2 = 0; 13176 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 13177 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 13178 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 13179 return true; 13180 13181 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13182 const GlobalValue *GV1 = nullptr; 13183 const GlobalValue *GV2 = nullptr; 13184 Offset1 = 0; 13185 Offset2 = 0; 13186 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 13187 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 13188 if (isGA1 && isGA2 && GV1 == GV2) 13189 return Offset1 == (Offset2 + Dist*Bytes); 13190 return false; 13191 } 13192 13193 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 13194 // not enforce equality of the chain operands. 13195 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 13196 unsigned Bytes, int Dist, 13197 SelectionDAG &DAG) { 13198 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 13199 EVT VT = LS->getMemoryVT(); 13200 SDValue Loc = LS->getBasePtr(); 13201 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 13202 } 13203 13204 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 13205 EVT VT; 13206 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 13207 default: return false; 13208 case Intrinsic::ppc_altivec_lvx: 13209 case Intrinsic::ppc_altivec_lvxl: 13210 case Intrinsic::ppc_vsx_lxvw4x: 13211 case Intrinsic::ppc_vsx_lxvw4x_be: 13212 VT = MVT::v4i32; 13213 break; 13214 case Intrinsic::ppc_vsx_lxvd2x: 13215 case Intrinsic::ppc_vsx_lxvd2x_be: 13216 VT = MVT::v2f64; 13217 break; 13218 case Intrinsic::ppc_altivec_lvebx: 13219 VT = MVT::i8; 13220 break; 13221 case Intrinsic::ppc_altivec_lvehx: 13222 VT = MVT::i16; 13223 break; 13224 case Intrinsic::ppc_altivec_lvewx: 13225 VT = MVT::i32; 13226 break; 13227 } 13228 13229 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 13230 } 13231 13232 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 13233 EVT VT; 13234 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 13235 default: return false; 13236 case Intrinsic::ppc_altivec_stvx: 13237 case Intrinsic::ppc_altivec_stvxl: 13238 case Intrinsic::ppc_vsx_stxvw4x: 13239 VT = MVT::v4i32; 13240 break; 13241 case Intrinsic::ppc_vsx_stxvd2x: 13242 VT = MVT::v2f64; 13243 break; 13244 case Intrinsic::ppc_vsx_stxvw4x_be: 13245 VT = MVT::v4i32; 13246 break; 13247 case Intrinsic::ppc_vsx_stxvd2x_be: 13248 VT = MVT::v2f64; 13249 break; 13250 case Intrinsic::ppc_altivec_stvebx: 13251 VT = MVT::i8; 13252 break; 13253 case Intrinsic::ppc_altivec_stvehx: 13254 VT = MVT::i16; 13255 break; 13256 case Intrinsic::ppc_altivec_stvewx: 13257 VT = MVT::i32; 13258 break; 13259 } 13260 13261 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 13262 } 13263 13264 return false; 13265 } 13266 13267 // Return true is there is a nearyby consecutive load to the one provided 13268 // (regardless of alignment). We search up and down the chain, looking though 13269 // token factors and other loads (but nothing else). As a result, a true result 13270 // indicates that it is safe to create a new consecutive load adjacent to the 13271 // load provided. 13272 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 13273 SDValue Chain = LD->getChain(); 13274 EVT VT = LD->getMemoryVT(); 13275 13276 SmallSet<SDNode *, 16> LoadRoots; 13277 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 13278 SmallSet<SDNode *, 16> Visited; 13279 13280 // First, search up the chain, branching to follow all token-factor operands. 13281 // If we find a consecutive load, then we're done, otherwise, record all 13282 // nodes just above the top-level loads and token factors. 13283 while (!Queue.empty()) { 13284 SDNode *ChainNext = Queue.pop_back_val(); 13285 if (!Visited.insert(ChainNext).second) 13286 continue; 13287 13288 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 13289 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 13290 return true; 13291 13292 if (!Visited.count(ChainLD->getChain().getNode())) 13293 Queue.push_back(ChainLD->getChain().getNode()); 13294 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 13295 for (const SDUse &O : ChainNext->ops()) 13296 if (!Visited.count(O.getNode())) 13297 Queue.push_back(O.getNode()); 13298 } else 13299 LoadRoots.insert(ChainNext); 13300 } 13301 13302 // Second, search down the chain, starting from the top-level nodes recorded 13303 // in the first phase. These top-level nodes are the nodes just above all 13304 // loads and token factors. Starting with their uses, recursively look though 13305 // all loads (just the chain uses) and token factors to find a consecutive 13306 // load. 13307 Visited.clear(); 13308 Queue.clear(); 13309 13310 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 13311 IE = LoadRoots.end(); I != IE; ++I) { 13312 Queue.push_back(*I); 13313 13314 while (!Queue.empty()) { 13315 SDNode *LoadRoot = Queue.pop_back_val(); 13316 if (!Visited.insert(LoadRoot).second) 13317 continue; 13318 13319 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 13320 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 13321 return true; 13322 13323 for (SDNode *U : LoadRoot->uses()) 13324 if (((isa<MemSDNode>(U) && 13325 cast<MemSDNode>(U)->getChain().getNode() == LoadRoot) || 13326 U->getOpcode() == ISD::TokenFactor) && 13327 !Visited.count(U)) 13328 Queue.push_back(U); 13329 } 13330 } 13331 13332 return false; 13333 } 13334 13335 /// This function is called when we have proved that a SETCC node can be replaced 13336 /// by subtraction (and other supporting instructions) so that the result of 13337 /// comparison is kept in a GPR instead of CR. This function is purely for 13338 /// codegen purposes and has some flags to guide the codegen process. 13339 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 13340 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 13341 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 13342 13343 // Zero extend the operands to the largest legal integer. Originally, they 13344 // must be of a strictly smaller size. 13345 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 13346 DAG.getConstant(Size, DL, MVT::i32)); 13347 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 13348 DAG.getConstant(Size, DL, MVT::i32)); 13349 13350 // Swap if needed. Depends on the condition code. 13351 if (Swap) 13352 std::swap(Op0, Op1); 13353 13354 // Subtract extended integers. 13355 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 13356 13357 // Move the sign bit to the least significant position and zero out the rest. 13358 // Now the least significant bit carries the result of original comparison. 13359 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 13360 DAG.getConstant(Size - 1, DL, MVT::i32)); 13361 auto Final = Shifted; 13362 13363 // Complement the result if needed. Based on the condition code. 13364 if (Complement) 13365 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 13366 DAG.getConstant(1, DL, MVT::i64)); 13367 13368 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 13369 } 13370 13371 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 13372 DAGCombinerInfo &DCI) const { 13373 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 13374 13375 SelectionDAG &DAG = DCI.DAG; 13376 SDLoc DL(N); 13377 13378 // Size of integers being compared has a critical role in the following 13379 // analysis, so we prefer to do this when all types are legal. 13380 if (!DCI.isAfterLegalizeDAG()) 13381 return SDValue(); 13382 13383 // If all users of SETCC extend its value to a legal integer type 13384 // then we replace SETCC with a subtraction 13385 for (const SDNode *U : N->uses()) 13386 if (U->getOpcode() != ISD::ZERO_EXTEND) 13387 return SDValue(); 13388 13389 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 13390 auto OpSize = N->getOperand(0).getValueSizeInBits(); 13391 13392 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 13393 13394 if (OpSize < Size) { 13395 switch (CC) { 13396 default: break; 13397 case ISD::SETULT: 13398 return generateEquivalentSub(N, Size, false, false, DL, DAG); 13399 case ISD::SETULE: 13400 return generateEquivalentSub(N, Size, true, true, DL, DAG); 13401 case ISD::SETUGT: 13402 return generateEquivalentSub(N, Size, false, true, DL, DAG); 13403 case ISD::SETUGE: 13404 return generateEquivalentSub(N, Size, true, false, DL, DAG); 13405 } 13406 } 13407 13408 return SDValue(); 13409 } 13410 13411 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 13412 DAGCombinerInfo &DCI) const { 13413 SelectionDAG &DAG = DCI.DAG; 13414 SDLoc dl(N); 13415 13416 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 13417 // If we're tracking CR bits, we need to be careful that we don't have: 13418 // trunc(binary-ops(zext(x), zext(y))) 13419 // or 13420 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 13421 // such that we're unnecessarily moving things into GPRs when it would be 13422 // better to keep them in CR bits. 13423 13424 // Note that trunc here can be an actual i1 trunc, or can be the effective 13425 // truncation that comes from a setcc or select_cc. 13426 if (N->getOpcode() == ISD::TRUNCATE && 13427 N->getValueType(0) != MVT::i1) 13428 return SDValue(); 13429 13430 if (N->getOperand(0).getValueType() != MVT::i32 && 13431 N->getOperand(0).getValueType() != MVT::i64) 13432 return SDValue(); 13433 13434 if (N->getOpcode() == ISD::SETCC || 13435 N->getOpcode() == ISD::SELECT_CC) { 13436 // If we're looking at a comparison, then we need to make sure that the 13437 // high bits (all except for the first) don't matter the result. 13438 ISD::CondCode CC = 13439 cast<CondCodeSDNode>(N->getOperand( 13440 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 13441 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 13442 13443 if (ISD::isSignedIntSetCC(CC)) { 13444 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 13445 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 13446 return SDValue(); 13447 } else if (ISD::isUnsignedIntSetCC(CC)) { 13448 if (!DAG.MaskedValueIsZero(N->getOperand(0), 13449 APInt::getHighBitsSet(OpBits, OpBits-1)) || 13450 !DAG.MaskedValueIsZero(N->getOperand(1), 13451 APInt::getHighBitsSet(OpBits, OpBits-1))) 13452 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 13453 : SDValue()); 13454 } else { 13455 // This is neither a signed nor an unsigned comparison, just make sure 13456 // that the high bits are equal. 13457 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0)); 13458 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1)); 13459 13460 // We don't really care about what is known about the first bit (if 13461 // anything), so pretend that it is known zero for both to ensure they can 13462 // be compared as constants. 13463 Op1Known.Zero.setBit(0); Op1Known.One.clearBit(0); 13464 Op2Known.Zero.setBit(0); Op2Known.One.clearBit(0); 13465 13466 if (!Op1Known.isConstant() || !Op2Known.isConstant() || 13467 Op1Known.getConstant() != Op2Known.getConstant()) 13468 return SDValue(); 13469 } 13470 } 13471 13472 // We now know that the higher-order bits are irrelevant, we just need to 13473 // make sure that all of the intermediate operations are bit operations, and 13474 // all inputs are extensions. 13475 if (N->getOperand(0).getOpcode() != ISD::AND && 13476 N->getOperand(0).getOpcode() != ISD::OR && 13477 N->getOperand(0).getOpcode() != ISD::XOR && 13478 N->getOperand(0).getOpcode() != ISD::SELECT && 13479 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 13480 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 13481 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 13482 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 13483 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 13484 return SDValue(); 13485 13486 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 13487 N->getOperand(1).getOpcode() != ISD::AND && 13488 N->getOperand(1).getOpcode() != ISD::OR && 13489 N->getOperand(1).getOpcode() != ISD::XOR && 13490 N->getOperand(1).getOpcode() != ISD::SELECT && 13491 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 13492 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 13493 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 13494 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 13495 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 13496 return SDValue(); 13497 13498 SmallVector<SDValue, 4> Inputs; 13499 SmallVector<SDValue, 8> BinOps, PromOps; 13500 SmallPtrSet<SDNode *, 16> Visited; 13501 13502 for (unsigned i = 0; i < 2; ++i) { 13503 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 13504 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 13505 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 13506 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 13507 isa<ConstantSDNode>(N->getOperand(i))) 13508 Inputs.push_back(N->getOperand(i)); 13509 else 13510 BinOps.push_back(N->getOperand(i)); 13511 13512 if (N->getOpcode() == ISD::TRUNCATE) 13513 break; 13514 } 13515 13516 // Visit all inputs, collect all binary operations (and, or, xor and 13517 // select) that are all fed by extensions. 13518 while (!BinOps.empty()) { 13519 SDValue BinOp = BinOps.pop_back_val(); 13520 13521 if (!Visited.insert(BinOp.getNode()).second) 13522 continue; 13523 13524 PromOps.push_back(BinOp); 13525 13526 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 13527 // The condition of the select is not promoted. 13528 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 13529 continue; 13530 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 13531 continue; 13532 13533 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 13534 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 13535 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 13536 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 13537 isa<ConstantSDNode>(BinOp.getOperand(i))) { 13538 Inputs.push_back(BinOp.getOperand(i)); 13539 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 13540 BinOp.getOperand(i).getOpcode() == ISD::OR || 13541 BinOp.getOperand(i).getOpcode() == ISD::XOR || 13542 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 13543 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 13544 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 13545 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 13546 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 13547 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 13548 BinOps.push_back(BinOp.getOperand(i)); 13549 } else { 13550 // We have an input that is not an extension or another binary 13551 // operation; we'll abort this transformation. 13552 return SDValue(); 13553 } 13554 } 13555 } 13556 13557 // Make sure that this is a self-contained cluster of operations (which 13558 // is not quite the same thing as saying that everything has only one 13559 // use). 13560 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 13561 if (isa<ConstantSDNode>(Inputs[i])) 13562 continue; 13563 13564 for (const SDNode *User : Inputs[i].getNode()->uses()) { 13565 if (User != N && !Visited.count(User)) 13566 return SDValue(); 13567 13568 // Make sure that we're not going to promote the non-output-value 13569 // operand(s) or SELECT or SELECT_CC. 13570 // FIXME: Although we could sometimes handle this, and it does occur in 13571 // practice that one of the condition inputs to the select is also one of 13572 // the outputs, we currently can't deal with this. 13573 if (User->getOpcode() == ISD::SELECT) { 13574 if (User->getOperand(0) == Inputs[i]) 13575 return SDValue(); 13576 } else if (User->getOpcode() == ISD::SELECT_CC) { 13577 if (User->getOperand(0) == Inputs[i] || 13578 User->getOperand(1) == Inputs[i]) 13579 return SDValue(); 13580 } 13581 } 13582 } 13583 13584 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 13585 for (const SDNode *User : PromOps[i].getNode()->uses()) { 13586 if (User != N && !Visited.count(User)) 13587 return SDValue(); 13588 13589 // Make sure that we're not going to promote the non-output-value 13590 // operand(s) or SELECT or SELECT_CC. 13591 // FIXME: Although we could sometimes handle this, and it does occur in 13592 // practice that one of the condition inputs to the select is also one of 13593 // the outputs, we currently can't deal with this. 13594 if (User->getOpcode() == ISD::SELECT) { 13595 if (User->getOperand(0) == PromOps[i]) 13596 return SDValue(); 13597 } else if (User->getOpcode() == ISD::SELECT_CC) { 13598 if (User->getOperand(0) == PromOps[i] || 13599 User->getOperand(1) == PromOps[i]) 13600 return SDValue(); 13601 } 13602 } 13603 } 13604 13605 // Replace all inputs with the extension operand. 13606 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 13607 // Constants may have users outside the cluster of to-be-promoted nodes, 13608 // and so we need to replace those as we do the promotions. 13609 if (isa<ConstantSDNode>(Inputs[i])) 13610 continue; 13611 else 13612 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 13613 } 13614 13615 std::list<HandleSDNode> PromOpHandles; 13616 for (auto &PromOp : PromOps) 13617 PromOpHandles.emplace_back(PromOp); 13618 13619 // Replace all operations (these are all the same, but have a different 13620 // (i1) return type). DAG.getNode will validate that the types of 13621 // a binary operator match, so go through the list in reverse so that 13622 // we've likely promoted both operands first. Any intermediate truncations or 13623 // extensions disappear. 13624 while (!PromOpHandles.empty()) { 13625 SDValue PromOp = PromOpHandles.back().getValue(); 13626 PromOpHandles.pop_back(); 13627 13628 if (PromOp.getOpcode() == ISD::TRUNCATE || 13629 PromOp.getOpcode() == ISD::SIGN_EXTEND || 13630 PromOp.getOpcode() == ISD::ZERO_EXTEND || 13631 PromOp.getOpcode() == ISD::ANY_EXTEND) { 13632 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 13633 PromOp.getOperand(0).getValueType() != MVT::i1) { 13634 // The operand is not yet ready (see comment below). 13635 PromOpHandles.emplace_front(PromOp); 13636 continue; 13637 } 13638 13639 SDValue RepValue = PromOp.getOperand(0); 13640 if (isa<ConstantSDNode>(RepValue)) 13641 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 13642 13643 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 13644 continue; 13645 } 13646 13647 unsigned C; 13648 switch (PromOp.getOpcode()) { 13649 default: C = 0; break; 13650 case ISD::SELECT: C = 1; break; 13651 case ISD::SELECT_CC: C = 2; break; 13652 } 13653 13654 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 13655 PromOp.getOperand(C).getValueType() != MVT::i1) || 13656 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 13657 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 13658 // The to-be-promoted operands of this node have not yet been 13659 // promoted (this should be rare because we're going through the 13660 // list backward, but if one of the operands has several users in 13661 // this cluster of to-be-promoted nodes, it is possible). 13662 PromOpHandles.emplace_front(PromOp); 13663 continue; 13664 } 13665 13666 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 13667 PromOp.getNode()->op_end()); 13668 13669 // If there are any constant inputs, make sure they're replaced now. 13670 for (unsigned i = 0; i < 2; ++i) 13671 if (isa<ConstantSDNode>(Ops[C+i])) 13672 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 13673 13674 DAG.ReplaceAllUsesOfValueWith(PromOp, 13675 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 13676 } 13677 13678 // Now we're left with the initial truncation itself. 13679 if (N->getOpcode() == ISD::TRUNCATE) 13680 return N->getOperand(0); 13681 13682 // Otherwise, this is a comparison. The operands to be compared have just 13683 // changed type (to i1), but everything else is the same. 13684 return SDValue(N, 0); 13685 } 13686 13687 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 13688 DAGCombinerInfo &DCI) const { 13689 SelectionDAG &DAG = DCI.DAG; 13690 SDLoc dl(N); 13691 13692 // If we're tracking CR bits, we need to be careful that we don't have: 13693 // zext(binary-ops(trunc(x), trunc(y))) 13694 // or 13695 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 13696 // such that we're unnecessarily moving things into CR bits that can more 13697 // efficiently stay in GPRs. Note that if we're not certain that the high 13698 // bits are set as required by the final extension, we still may need to do 13699 // some masking to get the proper behavior. 13700 13701 // This same functionality is important on PPC64 when dealing with 13702 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 13703 // the return values of functions. Because it is so similar, it is handled 13704 // here as well. 13705 13706 if (N->getValueType(0) != MVT::i32 && 13707 N->getValueType(0) != MVT::i64) 13708 return SDValue(); 13709 13710 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 13711 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 13712 return SDValue(); 13713 13714 if (N->getOperand(0).getOpcode() != ISD::AND && 13715 N->getOperand(0).getOpcode() != ISD::OR && 13716 N->getOperand(0).getOpcode() != ISD::XOR && 13717 N->getOperand(0).getOpcode() != ISD::SELECT && 13718 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 13719 return SDValue(); 13720 13721 SmallVector<SDValue, 4> Inputs; 13722 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 13723 SmallPtrSet<SDNode *, 16> Visited; 13724 13725 // Visit all inputs, collect all binary operations (and, or, xor and 13726 // select) that are all fed by truncations. 13727 while (!BinOps.empty()) { 13728 SDValue BinOp = BinOps.pop_back_val(); 13729 13730 if (!Visited.insert(BinOp.getNode()).second) 13731 continue; 13732 13733 PromOps.push_back(BinOp); 13734 13735 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 13736 // The condition of the select is not promoted. 13737 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 13738 continue; 13739 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 13740 continue; 13741 13742 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 13743 isa<ConstantSDNode>(BinOp.getOperand(i))) { 13744 Inputs.push_back(BinOp.getOperand(i)); 13745 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 13746 BinOp.getOperand(i).getOpcode() == ISD::OR || 13747 BinOp.getOperand(i).getOpcode() == ISD::XOR || 13748 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 13749 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 13750 BinOps.push_back(BinOp.getOperand(i)); 13751 } else { 13752 // We have an input that is not a truncation or another binary 13753 // operation; we'll abort this transformation. 13754 return SDValue(); 13755 } 13756 } 13757 } 13758 13759 // The operands of a select that must be truncated when the select is 13760 // promoted because the operand is actually part of the to-be-promoted set. 13761 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 13762 13763 // Make sure that this is a self-contained cluster of operations (which 13764 // is not quite the same thing as saying that everything has only one 13765 // use). 13766 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 13767 if (isa<ConstantSDNode>(Inputs[i])) 13768 continue; 13769 13770 for (SDNode *User : Inputs[i].getNode()->uses()) { 13771 if (User != N && !Visited.count(User)) 13772 return SDValue(); 13773 13774 // If we're going to promote the non-output-value operand(s) or SELECT or 13775 // SELECT_CC, record them for truncation. 13776 if (User->getOpcode() == ISD::SELECT) { 13777 if (User->getOperand(0) == Inputs[i]) 13778 SelectTruncOp[0].insert(std::make_pair(User, 13779 User->getOperand(0).getValueType())); 13780 } else if (User->getOpcode() == ISD::SELECT_CC) { 13781 if (User->getOperand(0) == Inputs[i]) 13782 SelectTruncOp[0].insert(std::make_pair(User, 13783 User->getOperand(0).getValueType())); 13784 if (User->getOperand(1) == Inputs[i]) 13785 SelectTruncOp[1].insert(std::make_pair(User, 13786 User->getOperand(1).getValueType())); 13787 } 13788 } 13789 } 13790 13791 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 13792 for (SDNode *User : PromOps[i].getNode()->uses()) { 13793 if (User != N && !Visited.count(User)) 13794 return SDValue(); 13795 13796 // If we're going to promote the non-output-value operand(s) or SELECT or 13797 // SELECT_CC, record them for truncation. 13798 if (User->getOpcode() == ISD::SELECT) { 13799 if (User->getOperand(0) == PromOps[i]) 13800 SelectTruncOp[0].insert(std::make_pair(User, 13801 User->getOperand(0).getValueType())); 13802 } else if (User->getOpcode() == ISD::SELECT_CC) { 13803 if (User->getOperand(0) == PromOps[i]) 13804 SelectTruncOp[0].insert(std::make_pair(User, 13805 User->getOperand(0).getValueType())); 13806 if (User->getOperand(1) == PromOps[i]) 13807 SelectTruncOp[1].insert(std::make_pair(User, 13808 User->getOperand(1).getValueType())); 13809 } 13810 } 13811 } 13812 13813 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 13814 bool ReallyNeedsExt = false; 13815 if (N->getOpcode() != ISD::ANY_EXTEND) { 13816 // If all of the inputs are not already sign/zero extended, then 13817 // we'll still need to do that at the end. 13818 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 13819 if (isa<ConstantSDNode>(Inputs[i])) 13820 continue; 13821 13822 unsigned OpBits = 13823 Inputs[i].getOperand(0).getValueSizeInBits(); 13824 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 13825 13826 if ((N->getOpcode() == ISD::ZERO_EXTEND && 13827 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 13828 APInt::getHighBitsSet(OpBits, 13829 OpBits-PromBits))) || 13830 (N->getOpcode() == ISD::SIGN_EXTEND && 13831 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 13832 (OpBits-(PromBits-1)))) { 13833 ReallyNeedsExt = true; 13834 break; 13835 } 13836 } 13837 } 13838 13839 // Replace all inputs, either with the truncation operand, or a 13840 // truncation or extension to the final output type. 13841 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 13842 // Constant inputs need to be replaced with the to-be-promoted nodes that 13843 // use them because they might have users outside of the cluster of 13844 // promoted nodes. 13845 if (isa<ConstantSDNode>(Inputs[i])) 13846 continue; 13847 13848 SDValue InSrc = Inputs[i].getOperand(0); 13849 if (Inputs[i].getValueType() == N->getValueType(0)) 13850 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 13851 else if (N->getOpcode() == ISD::SIGN_EXTEND) 13852 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 13853 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 13854 else if (N->getOpcode() == ISD::ZERO_EXTEND) 13855 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 13856 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 13857 else 13858 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 13859 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 13860 } 13861 13862 std::list<HandleSDNode> PromOpHandles; 13863 for (auto &PromOp : PromOps) 13864 PromOpHandles.emplace_back(PromOp); 13865 13866 // Replace all operations (these are all the same, but have a different 13867 // (promoted) return type). DAG.getNode will validate that the types of 13868 // a binary operator match, so go through the list in reverse so that 13869 // we've likely promoted both operands first. 13870 while (!PromOpHandles.empty()) { 13871 SDValue PromOp = PromOpHandles.back().getValue(); 13872 PromOpHandles.pop_back(); 13873 13874 unsigned C; 13875 switch (PromOp.getOpcode()) { 13876 default: C = 0; break; 13877 case ISD::SELECT: C = 1; break; 13878 case ISD::SELECT_CC: C = 2; break; 13879 } 13880 13881 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 13882 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 13883 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 13884 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 13885 // The to-be-promoted operands of this node have not yet been 13886 // promoted (this should be rare because we're going through the 13887 // list backward, but if one of the operands has several users in 13888 // this cluster of to-be-promoted nodes, it is possible). 13889 PromOpHandles.emplace_front(PromOp); 13890 continue; 13891 } 13892 13893 // For SELECT and SELECT_CC nodes, we do a similar check for any 13894 // to-be-promoted comparison inputs. 13895 if (PromOp.getOpcode() == ISD::SELECT || 13896 PromOp.getOpcode() == ISD::SELECT_CC) { 13897 if ((SelectTruncOp[0].count(PromOp.getNode()) && 13898 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 13899 (SelectTruncOp[1].count(PromOp.getNode()) && 13900 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 13901 PromOpHandles.emplace_front(PromOp); 13902 continue; 13903 } 13904 } 13905 13906 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 13907 PromOp.getNode()->op_end()); 13908 13909 // If this node has constant inputs, then they'll need to be promoted here. 13910 for (unsigned i = 0; i < 2; ++i) { 13911 if (!isa<ConstantSDNode>(Ops[C+i])) 13912 continue; 13913 if (Ops[C+i].getValueType() == N->getValueType(0)) 13914 continue; 13915 13916 if (N->getOpcode() == ISD::SIGN_EXTEND) 13917 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 13918 else if (N->getOpcode() == ISD::ZERO_EXTEND) 13919 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 13920 else 13921 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 13922 } 13923 13924 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 13925 // truncate them again to the original value type. 13926 if (PromOp.getOpcode() == ISD::SELECT || 13927 PromOp.getOpcode() == ISD::SELECT_CC) { 13928 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 13929 if (SI0 != SelectTruncOp[0].end()) 13930 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 13931 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 13932 if (SI1 != SelectTruncOp[1].end()) 13933 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 13934 } 13935 13936 DAG.ReplaceAllUsesOfValueWith(PromOp, 13937 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 13938 } 13939 13940 // Now we're left with the initial extension itself. 13941 if (!ReallyNeedsExt) 13942 return N->getOperand(0); 13943 13944 // To zero extend, just mask off everything except for the first bit (in the 13945 // i1 case). 13946 if (N->getOpcode() == ISD::ZERO_EXTEND) 13947 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 13948 DAG.getConstant(APInt::getLowBitsSet( 13949 N->getValueSizeInBits(0), PromBits), 13950 dl, N->getValueType(0))); 13951 13952 assert(N->getOpcode() == ISD::SIGN_EXTEND && 13953 "Invalid extension type"); 13954 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 13955 SDValue ShiftCst = 13956 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 13957 return DAG.getNode( 13958 ISD::SRA, dl, N->getValueType(0), 13959 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 13960 ShiftCst); 13961 } 13962 13963 SDValue PPCTargetLowering::combineSetCC(SDNode *N, 13964 DAGCombinerInfo &DCI) const { 13965 assert(N->getOpcode() == ISD::SETCC && 13966 "Should be called with a SETCC node"); 13967 13968 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 13969 if (CC == ISD::SETNE || CC == ISD::SETEQ) { 13970 SDValue LHS = N->getOperand(0); 13971 SDValue RHS = N->getOperand(1); 13972 13973 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS. 13974 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) && 13975 LHS.hasOneUse()) 13976 std::swap(LHS, RHS); 13977 13978 // x == 0-y --> x+y == 0 13979 // x != 0-y --> x+y != 0 13980 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) && 13981 RHS.hasOneUse()) { 13982 SDLoc DL(N); 13983 SelectionDAG &DAG = DCI.DAG; 13984 EVT VT = N->getValueType(0); 13985 EVT OpVT = LHS.getValueType(); 13986 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1)); 13987 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC); 13988 } 13989 } 13990 13991 return DAGCombineTruncBoolExt(N, DCI); 13992 } 13993 13994 // Is this an extending load from an f32 to an f64? 13995 static bool isFPExtLoad(SDValue Op) { 13996 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode())) 13997 return LD->getExtensionType() == ISD::EXTLOAD && 13998 Op.getValueType() == MVT::f64; 13999 return false; 14000 } 14001 14002 /// Reduces the number of fp-to-int conversion when building a vector. 14003 /// 14004 /// If this vector is built out of floating to integer conversions, 14005 /// transform it to a vector built out of floating point values followed by a 14006 /// single floating to integer conversion of the vector. 14007 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 14008 /// becomes (fptosi (build_vector ($A, $B, ...))) 14009 SDValue PPCTargetLowering:: 14010 combineElementTruncationToVectorTruncation(SDNode *N, 14011 DAGCombinerInfo &DCI) const { 14012 assert(N->getOpcode() == ISD::BUILD_VECTOR && 14013 "Should be called with a BUILD_VECTOR node"); 14014 14015 SelectionDAG &DAG = DCI.DAG; 14016 SDLoc dl(N); 14017 14018 SDValue FirstInput = N->getOperand(0); 14019 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 14020 "The input operand must be an fp-to-int conversion."); 14021 14022 // This combine happens after legalization so the fp_to_[su]i nodes are 14023 // already converted to PPCSISD nodes. 14024 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 14025 if (FirstConversion == PPCISD::FCTIDZ || 14026 FirstConversion == PPCISD::FCTIDUZ || 14027 FirstConversion == PPCISD::FCTIWZ || 14028 FirstConversion == PPCISD::FCTIWUZ) { 14029 bool IsSplat = true; 14030 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 14031 FirstConversion == PPCISD::FCTIWUZ; 14032 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 14033 SmallVector<SDValue, 4> Ops; 14034 EVT TargetVT = N->getValueType(0); 14035 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 14036 SDValue NextOp = N->getOperand(i); 14037 if (NextOp.getOpcode() != PPCISD::MFVSR) 14038 return SDValue(); 14039 unsigned NextConversion = NextOp.getOperand(0).getOpcode(); 14040 if (NextConversion != FirstConversion) 14041 return SDValue(); 14042 // If we are converting to 32-bit integers, we need to add an FP_ROUND. 14043 // This is not valid if the input was originally double precision. It is 14044 // also not profitable to do unless this is an extending load in which 14045 // case doing this combine will allow us to combine consecutive loads. 14046 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0))) 14047 return SDValue(); 14048 if (N->getOperand(i) != FirstInput) 14049 IsSplat = false; 14050 } 14051 14052 // If this is a splat, we leave it as-is since there will be only a single 14053 // fp-to-int conversion followed by a splat of the integer. This is better 14054 // for 32-bit and smaller ints and neutral for 64-bit ints. 14055 if (IsSplat) 14056 return SDValue(); 14057 14058 // Now that we know we have the right type of node, get its operands 14059 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 14060 SDValue In = N->getOperand(i).getOperand(0); 14061 if (Is32Bit) { 14062 // For 32-bit values, we need to add an FP_ROUND node (if we made it 14063 // here, we know that all inputs are extending loads so this is safe). 14064 if (In.isUndef()) 14065 Ops.push_back(DAG.getUNDEF(SrcVT)); 14066 else { 14067 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 14068 MVT::f32, In.getOperand(0), 14069 DAG.getIntPtrConstant(1, dl)); 14070 Ops.push_back(Trunc); 14071 } 14072 } else 14073 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 14074 } 14075 14076 unsigned Opcode; 14077 if (FirstConversion == PPCISD::FCTIDZ || 14078 FirstConversion == PPCISD::FCTIWZ) 14079 Opcode = ISD::FP_TO_SINT; 14080 else 14081 Opcode = ISD::FP_TO_UINT; 14082 14083 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 14084 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 14085 return DAG.getNode(Opcode, dl, TargetVT, BV); 14086 } 14087 return SDValue(); 14088 } 14089 14090 /// Reduce the number of loads when building a vector. 14091 /// 14092 /// Building a vector out of multiple loads can be converted to a load 14093 /// of the vector type if the loads are consecutive. If the loads are 14094 /// consecutive but in descending order, a shuffle is added at the end 14095 /// to reorder the vector. 14096 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 14097 assert(N->getOpcode() == ISD::BUILD_VECTOR && 14098 "Should be called with a BUILD_VECTOR node"); 14099 14100 SDLoc dl(N); 14101 14102 // Return early for non byte-sized type, as they can't be consecutive. 14103 if (!N->getValueType(0).getVectorElementType().isByteSized()) 14104 return SDValue(); 14105 14106 bool InputsAreConsecutiveLoads = true; 14107 bool InputsAreReverseConsecutive = true; 14108 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize(); 14109 SDValue FirstInput = N->getOperand(0); 14110 bool IsRoundOfExtLoad = false; 14111 14112 if (FirstInput.getOpcode() == ISD::FP_ROUND && 14113 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 14114 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 14115 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 14116 } 14117 // Not a build vector of (possibly fp_rounded) loads. 14118 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) || 14119 N->getNumOperands() == 1) 14120 return SDValue(); 14121 14122 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 14123 // If any inputs are fp_round(extload), they all must be. 14124 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 14125 return SDValue(); 14126 14127 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 14128 N->getOperand(i); 14129 if (NextInput.getOpcode() != ISD::LOAD) 14130 return SDValue(); 14131 14132 SDValue PreviousInput = 14133 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 14134 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 14135 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 14136 14137 // If any inputs are fp_round(extload), they all must be. 14138 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 14139 return SDValue(); 14140 14141 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 14142 InputsAreConsecutiveLoads = false; 14143 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 14144 InputsAreReverseConsecutive = false; 14145 14146 // Exit early if the loads are neither consecutive nor reverse consecutive. 14147 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 14148 return SDValue(); 14149 } 14150 14151 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 14152 "The loads cannot be both consecutive and reverse consecutive."); 14153 14154 SDValue FirstLoadOp = 14155 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 14156 SDValue LastLoadOp = 14157 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 14158 N->getOperand(N->getNumOperands()-1); 14159 14160 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 14161 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 14162 if (InputsAreConsecutiveLoads) { 14163 assert(LD1 && "Input needs to be a LoadSDNode."); 14164 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 14165 LD1->getBasePtr(), LD1->getPointerInfo(), 14166 LD1->getAlign()); 14167 } 14168 if (InputsAreReverseConsecutive) { 14169 assert(LDL && "Input needs to be a LoadSDNode."); 14170 SDValue Load = 14171 DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), LDL->getBasePtr(), 14172 LDL->getPointerInfo(), LDL->getAlign()); 14173 SmallVector<int, 16> Ops; 14174 for (int i = N->getNumOperands() - 1; i >= 0; i--) 14175 Ops.push_back(i); 14176 14177 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 14178 DAG.getUNDEF(N->getValueType(0)), Ops); 14179 } 14180 return SDValue(); 14181 } 14182 14183 // This function adds the required vector_shuffle needed to get 14184 // the elements of the vector extract in the correct position 14185 // as specified by the CorrectElems encoding. 14186 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 14187 SDValue Input, uint64_t Elems, 14188 uint64_t CorrectElems) { 14189 SDLoc dl(N); 14190 14191 unsigned NumElems = Input.getValueType().getVectorNumElements(); 14192 SmallVector<int, 16> ShuffleMask(NumElems, -1); 14193 14194 // Knowing the element indices being extracted from the original 14195 // vector and the order in which they're being inserted, just put 14196 // them at element indices required for the instruction. 14197 for (unsigned i = 0; i < N->getNumOperands(); i++) { 14198 if (DAG.getDataLayout().isLittleEndian()) 14199 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 14200 else 14201 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 14202 CorrectElems = CorrectElems >> 8; 14203 Elems = Elems >> 8; 14204 } 14205 14206 SDValue Shuffle = 14207 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 14208 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 14209 14210 EVT VT = N->getValueType(0); 14211 SDValue Conv = DAG.getBitcast(VT, Shuffle); 14212 14213 EVT ExtVT = EVT::getVectorVT(*DAG.getContext(), 14214 Input.getValueType().getVectorElementType(), 14215 VT.getVectorNumElements()); 14216 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv, 14217 DAG.getValueType(ExtVT)); 14218 } 14219 14220 // Look for build vector patterns where input operands come from sign 14221 // extended vector_extract elements of specific indices. If the correct indices 14222 // aren't used, add a vector shuffle to fix up the indices and create 14223 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions 14224 // during instruction selection. 14225 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 14226 // This array encodes the indices that the vector sign extend instructions 14227 // extract from when extending from one type to another for both BE and LE. 14228 // The right nibble of each byte corresponds to the LE incides. 14229 // and the left nibble of each byte corresponds to the BE incides. 14230 // For example: 0x3074B8FC byte->word 14231 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 14232 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 14233 // For example: 0x000070F8 byte->double word 14234 // For LE: the allowed indices are: 0x0,0x8 14235 // For BE: the allowed indices are: 0x7,0xF 14236 uint64_t TargetElems[] = { 14237 0x3074B8FC, // b->w 14238 0x000070F8, // b->d 14239 0x10325476, // h->w 14240 0x00003074, // h->d 14241 0x00001032, // w->d 14242 }; 14243 14244 uint64_t Elems = 0; 14245 int Index; 14246 SDValue Input; 14247 14248 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 14249 if (!Op) 14250 return false; 14251 if (Op.getOpcode() != ISD::SIGN_EXTEND && 14252 Op.getOpcode() != ISD::SIGN_EXTEND_INREG) 14253 return false; 14254 14255 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value 14256 // of the right width. 14257 SDValue Extract = Op.getOperand(0); 14258 if (Extract.getOpcode() == ISD::ANY_EXTEND) 14259 Extract = Extract.getOperand(0); 14260 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 14261 return false; 14262 14263 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 14264 if (!ExtOp) 14265 return false; 14266 14267 Index = ExtOp->getZExtValue(); 14268 if (Input && Input != Extract.getOperand(0)) 14269 return false; 14270 14271 if (!Input) 14272 Input = Extract.getOperand(0); 14273 14274 Elems = Elems << 8; 14275 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 14276 Elems |= Index; 14277 14278 return true; 14279 }; 14280 14281 // If the build vector operands aren't sign extended vector extracts, 14282 // of the same input vector, then return. 14283 for (unsigned i = 0; i < N->getNumOperands(); i++) { 14284 if (!isSExtOfVecExtract(N->getOperand(i))) { 14285 return SDValue(); 14286 } 14287 } 14288 14289 // If the vector extract indicies are not correct, add the appropriate 14290 // vector_shuffle. 14291 int TgtElemArrayIdx; 14292 int InputSize = Input.getValueType().getScalarSizeInBits(); 14293 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 14294 if (InputSize + OutputSize == 40) 14295 TgtElemArrayIdx = 0; 14296 else if (InputSize + OutputSize == 72) 14297 TgtElemArrayIdx = 1; 14298 else if (InputSize + OutputSize == 48) 14299 TgtElemArrayIdx = 2; 14300 else if (InputSize + OutputSize == 80) 14301 TgtElemArrayIdx = 3; 14302 else if (InputSize + OutputSize == 96) 14303 TgtElemArrayIdx = 4; 14304 else 14305 return SDValue(); 14306 14307 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 14308 CorrectElems = DAG.getDataLayout().isLittleEndian() 14309 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 14310 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 14311 if (Elems != CorrectElems) { 14312 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 14313 } 14314 14315 // Regular lowering will catch cases where a shuffle is not needed. 14316 return SDValue(); 14317 } 14318 14319 // Look for the pattern of a load from a narrow width to i128, feeding 14320 // into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node 14321 // (LXVRZX). This node represents a zero extending load that will be matched 14322 // to the Load VSX Vector Rightmost instructions. 14323 static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) { 14324 SDLoc DL(N); 14325 14326 // This combine is only eligible for a BUILD_VECTOR of v1i128. 14327 if (N->getValueType(0) != MVT::v1i128) 14328 return SDValue(); 14329 14330 SDValue Operand = N->getOperand(0); 14331 // Proceed with the transformation if the operand to the BUILD_VECTOR 14332 // is a load instruction. 14333 if (Operand.getOpcode() != ISD::LOAD) 14334 return SDValue(); 14335 14336 auto *LD = cast<LoadSDNode>(Operand); 14337 EVT MemoryType = LD->getMemoryVT(); 14338 14339 // This transformation is only valid if the we are loading either a byte, 14340 // halfword, word, or doubleword. 14341 bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 || 14342 MemoryType == MVT::i32 || MemoryType == MVT::i64; 14343 14344 // Ensure that the load from the narrow width is being zero extended to i128. 14345 if (!ValidLDType || 14346 (LD->getExtensionType() != ISD::ZEXTLOAD && 14347 LD->getExtensionType() != ISD::EXTLOAD)) 14348 return SDValue(); 14349 14350 SDValue LoadOps[] = { 14351 LD->getChain(), LD->getBasePtr(), 14352 DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)}; 14353 14354 return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL, 14355 DAG.getVTList(MVT::v1i128, MVT::Other), 14356 LoadOps, MemoryType, LD->getMemOperand()); 14357 } 14358 14359 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 14360 DAGCombinerInfo &DCI) const { 14361 assert(N->getOpcode() == ISD::BUILD_VECTOR && 14362 "Should be called with a BUILD_VECTOR node"); 14363 14364 SelectionDAG &DAG = DCI.DAG; 14365 SDLoc dl(N); 14366 14367 if (!Subtarget.hasVSX()) 14368 return SDValue(); 14369 14370 // The target independent DAG combiner will leave a build_vector of 14371 // float-to-int conversions intact. We can generate MUCH better code for 14372 // a float-to-int conversion of a vector of floats. 14373 SDValue FirstInput = N->getOperand(0); 14374 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 14375 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 14376 if (Reduced) 14377 return Reduced; 14378 } 14379 14380 // If we're building a vector out of consecutive loads, just load that 14381 // vector type. 14382 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 14383 if (Reduced) 14384 return Reduced; 14385 14386 // If we're building a vector out of extended elements from another vector 14387 // we have P9 vector integer extend instructions. The code assumes legal 14388 // input types (i.e. it can't handle things like v4i16) so do not run before 14389 // legalization. 14390 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) { 14391 Reduced = combineBVOfVecSExt(N, DAG); 14392 if (Reduced) 14393 return Reduced; 14394 } 14395 14396 // On Power10, the Load VSX Vector Rightmost instructions can be utilized 14397 // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR 14398 // is a load from <valid narrow width> to i128. 14399 if (Subtarget.isISA3_1()) { 14400 SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG); 14401 if (BVOfZLoad) 14402 return BVOfZLoad; 14403 } 14404 14405 if (N->getValueType(0) != MVT::v2f64) 14406 return SDValue(); 14407 14408 // Looking for: 14409 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 14410 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 14411 FirstInput.getOpcode() != ISD::UINT_TO_FP) 14412 return SDValue(); 14413 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 14414 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 14415 return SDValue(); 14416 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 14417 return SDValue(); 14418 14419 SDValue Ext1 = FirstInput.getOperand(0); 14420 SDValue Ext2 = N->getOperand(1).getOperand(0); 14421 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 14422 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 14423 return SDValue(); 14424 14425 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 14426 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 14427 if (!Ext1Op || !Ext2Op) 14428 return SDValue(); 14429 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 || 14430 Ext1.getOperand(0) != Ext2.getOperand(0)) 14431 return SDValue(); 14432 14433 int FirstElem = Ext1Op->getZExtValue(); 14434 int SecondElem = Ext2Op->getZExtValue(); 14435 int SubvecIdx; 14436 if (FirstElem == 0 && SecondElem == 1) 14437 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 14438 else if (FirstElem == 2 && SecondElem == 3) 14439 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 14440 else 14441 return SDValue(); 14442 14443 SDValue SrcVec = Ext1.getOperand(0); 14444 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 14445 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 14446 return DAG.getNode(NodeType, dl, MVT::v2f64, 14447 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 14448 } 14449 14450 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 14451 DAGCombinerInfo &DCI) const { 14452 assert((N->getOpcode() == ISD::SINT_TO_FP || 14453 N->getOpcode() == ISD::UINT_TO_FP) && 14454 "Need an int -> FP conversion node here"); 14455 14456 if (useSoftFloat() || !Subtarget.has64BitSupport()) 14457 return SDValue(); 14458 14459 SelectionDAG &DAG = DCI.DAG; 14460 SDLoc dl(N); 14461 SDValue Op(N, 0); 14462 14463 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 14464 // from the hardware. 14465 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 14466 return SDValue(); 14467 if (!Op.getOperand(0).getValueType().isSimple()) 14468 return SDValue(); 14469 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 14470 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 14471 return SDValue(); 14472 14473 SDValue FirstOperand(Op.getOperand(0)); 14474 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 14475 (FirstOperand.getValueType() == MVT::i8 || 14476 FirstOperand.getValueType() == MVT::i16); 14477 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 14478 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 14479 bool DstDouble = Op.getValueType() == MVT::f64; 14480 unsigned ConvOp = Signed ? 14481 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 14482 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 14483 SDValue WidthConst = 14484 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 14485 dl, false); 14486 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 14487 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 14488 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 14489 DAG.getVTList(MVT::f64, MVT::Other), 14490 Ops, MVT::i8, LDN->getMemOperand()); 14491 14492 // For signed conversion, we need to sign-extend the value in the VSR 14493 if (Signed) { 14494 SDValue ExtOps[] = { Ld, WidthConst }; 14495 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 14496 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 14497 } else 14498 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 14499 } 14500 14501 14502 // For i32 intermediate values, unfortunately, the conversion functions 14503 // leave the upper 32 bits of the value are undefined. Within the set of 14504 // scalar instructions, we have no method for zero- or sign-extending the 14505 // value. Thus, we cannot handle i32 intermediate values here. 14506 if (Op.getOperand(0).getValueType() == MVT::i32) 14507 return SDValue(); 14508 14509 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 14510 "UINT_TO_FP is supported only with FPCVT"); 14511 14512 // If we have FCFIDS, then use it when converting to single-precision. 14513 // Otherwise, convert to double-precision and then round. 14514 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 14515 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 14516 : PPCISD::FCFIDS) 14517 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 14518 : PPCISD::FCFID); 14519 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 14520 ? MVT::f32 14521 : MVT::f64; 14522 14523 // If we're converting from a float, to an int, and back to a float again, 14524 // then we don't need the store/load pair at all. 14525 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 14526 Subtarget.hasFPCVT()) || 14527 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 14528 SDValue Src = Op.getOperand(0).getOperand(0); 14529 if (Src.getValueType() == MVT::f32) { 14530 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 14531 DCI.AddToWorklist(Src.getNode()); 14532 } else if (Src.getValueType() != MVT::f64) { 14533 // Make sure that we don't pick up a ppc_fp128 source value. 14534 return SDValue(); 14535 } 14536 14537 unsigned FCTOp = 14538 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 14539 PPCISD::FCTIDUZ; 14540 14541 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 14542 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 14543 14544 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 14545 FP = DAG.getNode(ISD::FP_ROUND, dl, 14546 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 14547 DCI.AddToWorklist(FP.getNode()); 14548 } 14549 14550 return FP; 14551 } 14552 14553 return SDValue(); 14554 } 14555 14556 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 14557 // builtins) into loads with swaps. 14558 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 14559 DAGCombinerInfo &DCI) const { 14560 // Delay VSX load for LE combine until after LegalizeOps to prioritize other 14561 // load combines. 14562 if (DCI.isBeforeLegalizeOps()) 14563 return SDValue(); 14564 14565 SelectionDAG &DAG = DCI.DAG; 14566 SDLoc dl(N); 14567 SDValue Chain; 14568 SDValue Base; 14569 MachineMemOperand *MMO; 14570 14571 switch (N->getOpcode()) { 14572 default: 14573 llvm_unreachable("Unexpected opcode for little endian VSX load"); 14574 case ISD::LOAD: { 14575 LoadSDNode *LD = cast<LoadSDNode>(N); 14576 Chain = LD->getChain(); 14577 Base = LD->getBasePtr(); 14578 MMO = LD->getMemOperand(); 14579 // If the MMO suggests this isn't a load of a full vector, leave 14580 // things alone. For a built-in, we have to make the change for 14581 // correctness, so if there is a size problem that will be a bug. 14582 if (MMO->getSize() < 16) 14583 return SDValue(); 14584 break; 14585 } 14586 case ISD::INTRINSIC_W_CHAIN: { 14587 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 14588 Chain = Intrin->getChain(); 14589 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 14590 // us what we want. Get operand 2 instead. 14591 Base = Intrin->getOperand(2); 14592 MMO = Intrin->getMemOperand(); 14593 break; 14594 } 14595 } 14596 14597 MVT VecTy = N->getValueType(0).getSimpleVT(); 14598 14599 SDValue LoadOps[] = { Chain, Base }; 14600 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 14601 DAG.getVTList(MVT::v2f64, MVT::Other), 14602 LoadOps, MVT::v2f64, MMO); 14603 14604 DCI.AddToWorklist(Load.getNode()); 14605 Chain = Load.getValue(1); 14606 SDValue Swap = DAG.getNode( 14607 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 14608 DCI.AddToWorklist(Swap.getNode()); 14609 14610 // Add a bitcast if the resulting load type doesn't match v2f64. 14611 if (VecTy != MVT::v2f64) { 14612 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 14613 DCI.AddToWorklist(N.getNode()); 14614 // Package {bitcast value, swap's chain} to match Load's shape. 14615 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 14616 N, Swap.getValue(1)); 14617 } 14618 14619 return Swap; 14620 } 14621 14622 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 14623 // builtins) into stores with swaps. 14624 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 14625 DAGCombinerInfo &DCI) const { 14626 // Delay VSX store for LE combine until after LegalizeOps to prioritize other 14627 // store combines. 14628 if (DCI.isBeforeLegalizeOps()) 14629 return SDValue(); 14630 14631 SelectionDAG &DAG = DCI.DAG; 14632 SDLoc dl(N); 14633 SDValue Chain; 14634 SDValue Base; 14635 unsigned SrcOpnd; 14636 MachineMemOperand *MMO; 14637 14638 switch (N->getOpcode()) { 14639 default: 14640 llvm_unreachable("Unexpected opcode for little endian VSX store"); 14641 case ISD::STORE: { 14642 StoreSDNode *ST = cast<StoreSDNode>(N); 14643 Chain = ST->getChain(); 14644 Base = ST->getBasePtr(); 14645 MMO = ST->getMemOperand(); 14646 SrcOpnd = 1; 14647 // If the MMO suggests this isn't a store of a full vector, leave 14648 // things alone. For a built-in, we have to make the change for 14649 // correctness, so if there is a size problem that will be a bug. 14650 if (MMO->getSize() < 16) 14651 return SDValue(); 14652 break; 14653 } 14654 case ISD::INTRINSIC_VOID: { 14655 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 14656 Chain = Intrin->getChain(); 14657 // Intrin->getBasePtr() oddly does not get what we want. 14658 Base = Intrin->getOperand(3); 14659 MMO = Intrin->getMemOperand(); 14660 SrcOpnd = 2; 14661 break; 14662 } 14663 } 14664 14665 SDValue Src = N->getOperand(SrcOpnd); 14666 MVT VecTy = Src.getValueType().getSimpleVT(); 14667 14668 // All stores are done as v2f64 and possible bit cast. 14669 if (VecTy != MVT::v2f64) { 14670 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 14671 DCI.AddToWorklist(Src.getNode()); 14672 } 14673 14674 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 14675 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 14676 DCI.AddToWorklist(Swap.getNode()); 14677 Chain = Swap.getValue(1); 14678 SDValue StoreOps[] = { Chain, Swap, Base }; 14679 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 14680 DAG.getVTList(MVT::Other), 14681 StoreOps, VecTy, MMO); 14682 DCI.AddToWorklist(Store.getNode()); 14683 return Store; 14684 } 14685 14686 // Handle DAG combine for STORE (FP_TO_INT F). 14687 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N, 14688 DAGCombinerInfo &DCI) const { 14689 14690 SelectionDAG &DAG = DCI.DAG; 14691 SDLoc dl(N); 14692 unsigned Opcode = N->getOperand(1).getOpcode(); 14693 14694 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) 14695 && "Not a FP_TO_INT Instruction!"); 14696 14697 SDValue Val = N->getOperand(1).getOperand(0); 14698 EVT Op1VT = N->getOperand(1).getValueType(); 14699 EVT ResVT = Val.getValueType(); 14700 14701 if (!isTypeLegal(ResVT)) 14702 return SDValue(); 14703 14704 // Only perform combine for conversion to i64/i32 or power9 i16/i8. 14705 bool ValidTypeForStoreFltAsInt = 14706 (Op1VT == MVT::i32 || Op1VT == MVT::i64 || 14707 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8))); 14708 14709 if (ResVT == MVT::f128 && !Subtarget.hasP9Vector()) 14710 return SDValue(); 14711 14712 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() || 14713 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt) 14714 return SDValue(); 14715 14716 // Extend f32 values to f64 14717 if (ResVT.getScalarSizeInBits() == 32) { 14718 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 14719 DCI.AddToWorklist(Val.getNode()); 14720 } 14721 14722 // Set signed or unsigned conversion opcode. 14723 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ? 14724 PPCISD::FP_TO_SINT_IN_VSR : 14725 PPCISD::FP_TO_UINT_IN_VSR; 14726 14727 Val = DAG.getNode(ConvOpcode, 14728 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val); 14729 DCI.AddToWorklist(Val.getNode()); 14730 14731 // Set number of bytes being converted. 14732 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8; 14733 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2), 14734 DAG.getIntPtrConstant(ByteSize, dl, false), 14735 DAG.getValueType(Op1VT) }; 14736 14737 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl, 14738 DAG.getVTList(MVT::Other), Ops, 14739 cast<StoreSDNode>(N)->getMemoryVT(), 14740 cast<StoreSDNode>(N)->getMemOperand()); 14741 14742 DCI.AddToWorklist(Val.getNode()); 14743 return Val; 14744 } 14745 14746 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) { 14747 // Check that the source of the element keeps flipping 14748 // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts). 14749 bool PrevElemFromFirstVec = Mask[0] < NumElts; 14750 for (int i = 1, e = Mask.size(); i < e; i++) { 14751 if (PrevElemFromFirstVec && Mask[i] < NumElts) 14752 return false; 14753 if (!PrevElemFromFirstVec && Mask[i] >= NumElts) 14754 return false; 14755 PrevElemFromFirstVec = !PrevElemFromFirstVec; 14756 } 14757 return true; 14758 } 14759 14760 static bool isSplatBV(SDValue Op) { 14761 if (Op.getOpcode() != ISD::BUILD_VECTOR) 14762 return false; 14763 SDValue FirstOp; 14764 14765 // Find first non-undef input. 14766 for (int i = 0, e = Op.getNumOperands(); i < e; i++) { 14767 FirstOp = Op.getOperand(i); 14768 if (!FirstOp.isUndef()) 14769 break; 14770 } 14771 14772 // All inputs are undef or the same as the first non-undef input. 14773 for (int i = 1, e = Op.getNumOperands(); i < e; i++) 14774 if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef()) 14775 return false; 14776 return true; 14777 } 14778 14779 static SDValue isScalarToVec(SDValue Op) { 14780 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR) 14781 return Op; 14782 if (Op.getOpcode() != ISD::BITCAST) 14783 return SDValue(); 14784 Op = Op.getOperand(0); 14785 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR) 14786 return Op; 14787 return SDValue(); 14788 } 14789 14790 // Fix up the shuffle mask to account for the fact that the result of 14791 // scalar_to_vector is not in lane zero. This just takes all values in 14792 // the ranges specified by the min/max indices and adds the number of 14793 // elements required to ensure each element comes from the respective 14794 // position in the valid lane. 14795 // On little endian, that's just the corresponding element in the other 14796 // half of the vector. On big endian, it is in the same half but right 14797 // justified rather than left justified in that half. 14798 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV, 14799 int LHSMaxIdx, int RHSMinIdx, 14800 int RHSMaxIdx, int HalfVec, 14801 unsigned ValidLaneWidth, 14802 const PPCSubtarget &Subtarget) { 14803 for (int i = 0, e = ShuffV.size(); i < e; i++) { 14804 int Idx = ShuffV[i]; 14805 if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx)) 14806 ShuffV[i] += 14807 Subtarget.isLittleEndian() ? HalfVec : HalfVec - ValidLaneWidth; 14808 } 14809 } 14810 14811 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if 14812 // the original is: 14813 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C)))) 14814 // In such a case, just change the shuffle mask to extract the element 14815 // from the permuted index. 14816 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG, 14817 const PPCSubtarget &Subtarget) { 14818 SDLoc dl(OrigSToV); 14819 EVT VT = OrigSToV.getValueType(); 14820 assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR && 14821 "Expecting a SCALAR_TO_VECTOR here"); 14822 SDValue Input = OrigSToV.getOperand(0); 14823 14824 if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 14825 ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1)); 14826 SDValue OrigVector = Input.getOperand(0); 14827 14828 // Can't handle non-const element indices or different vector types 14829 // for the input to the extract and the output of the scalar_to_vector. 14830 if (Idx && VT == OrigVector.getValueType()) { 14831 unsigned NumElts = VT.getVectorNumElements(); 14832 assert( 14833 NumElts > 1 && 14834 "Cannot produce a permuted scalar_to_vector for one element vector"); 14835 SmallVector<int, 16> NewMask(NumElts, -1); 14836 unsigned ResultInElt = NumElts / 2; 14837 ResultInElt -= Subtarget.isLittleEndian() ? 0 : 1; 14838 NewMask[ResultInElt] = Idx->getZExtValue(); 14839 return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask); 14840 } 14841 } 14842 return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT, 14843 OrigSToV.getOperand(0)); 14844 } 14845 14846 // On little endian subtargets, combine shuffles such as: 14847 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b 14848 // into: 14849 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b 14850 // because the latter can be matched to a single instruction merge. 14851 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute 14852 // to put the value into element zero. Adjust the shuffle mask so that the 14853 // vector can remain in permuted form (to prevent a swap prior to a shuffle). 14854 // On big endian targets, this is still useful for SCALAR_TO_VECTOR 14855 // nodes with elements smaller than doubleword because all the ways 14856 // of getting scalar data into a vector register put the value in the 14857 // rightmost element of the left half of the vector. 14858 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN, 14859 SelectionDAG &DAG) const { 14860 SDValue LHS = SVN->getOperand(0); 14861 SDValue RHS = SVN->getOperand(1); 14862 auto Mask = SVN->getMask(); 14863 int NumElts = LHS.getValueType().getVectorNumElements(); 14864 SDValue Res(SVN, 0); 14865 SDLoc dl(SVN); 14866 bool IsLittleEndian = Subtarget.isLittleEndian(); 14867 14868 // On big endian targets this is only useful for subtargets with direct moves. 14869 // On little endian targets it would be useful for all subtargets with VSX. 14870 // However adding special handling for LE subtargets without direct moves 14871 // would be wasted effort since the minimum arch for LE is ISA 2.07 (Power8) 14872 // which includes direct moves. 14873 if (!Subtarget.hasDirectMove()) 14874 return Res; 14875 14876 // If this is not a shuffle of a shuffle and the first element comes from 14877 // the second vector, canonicalize to the commuted form. This will make it 14878 // more likely to match one of the single instruction patterns. 14879 if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 14880 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) { 14881 std::swap(LHS, RHS); 14882 Res = DAG.getCommutedVectorShuffle(*SVN); 14883 Mask = cast<ShuffleVectorSDNode>(Res)->getMask(); 14884 } 14885 14886 // Adjust the shuffle mask if either input vector comes from a 14887 // SCALAR_TO_VECTOR and keep the respective input vector in permuted 14888 // form (to prevent the need for a swap). 14889 SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end()); 14890 SDValue SToVLHS = isScalarToVec(LHS); 14891 SDValue SToVRHS = isScalarToVec(RHS); 14892 if (SToVLHS || SToVRHS) { 14893 // FIXME: If both LHS and RHS are SCALAR_TO_VECTOR, but are not the 14894 // same type and have differing element sizes, then do not perform 14895 // the following transformation. The current transformation for 14896 // SCALAR_TO_VECTOR assumes that both input vectors have the same 14897 // element size. This will be updated in the future to account for 14898 // differing sizes of the LHS and RHS. 14899 if (SToVLHS && SToVRHS && 14900 (SToVLHS.getValueType().getScalarSizeInBits() != 14901 SToVRHS.getValueType().getScalarSizeInBits())) 14902 return Res; 14903 14904 int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements() 14905 : SToVRHS.getValueType().getVectorNumElements(); 14906 int NumEltsOut = ShuffV.size(); 14907 // The width of the "valid lane" (i.e. the lane that contains the value that 14908 // is vectorized) needs to be expressed in terms of the number of elements 14909 // of the shuffle. It is thereby the ratio of the values before and after 14910 // any bitcast. 14911 unsigned ValidLaneWidth = 14912 SToVLHS ? SToVLHS.getValueType().getScalarSizeInBits() / 14913 LHS.getValueType().getScalarSizeInBits() 14914 : SToVRHS.getValueType().getScalarSizeInBits() / 14915 RHS.getValueType().getScalarSizeInBits(); 14916 14917 // Initially assume that neither input is permuted. These will be adjusted 14918 // accordingly if either input is. 14919 int LHSMaxIdx = -1; 14920 int RHSMinIdx = -1; 14921 int RHSMaxIdx = -1; 14922 int HalfVec = LHS.getValueType().getVectorNumElements() / 2; 14923 14924 // Get the permuted scalar to vector nodes for the source(s) that come from 14925 // ISD::SCALAR_TO_VECTOR. 14926 // On big endian systems, this only makes sense for element sizes smaller 14927 // than 64 bits since for 64-bit elements, all instructions already put 14928 // the value into element zero. Since scalar size of LHS and RHS may differ 14929 // after isScalarToVec, this should be checked using their own sizes. 14930 if (SToVLHS) { 14931 if (!IsLittleEndian && SToVLHS.getValueType().getScalarSizeInBits() >= 64) 14932 return Res; 14933 // Set up the values for the shuffle vector fixup. 14934 LHSMaxIdx = NumEltsOut / NumEltsIn; 14935 SToVLHS = getSToVPermuted(SToVLHS, DAG, Subtarget); 14936 if (SToVLHS.getValueType() != LHS.getValueType()) 14937 SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS); 14938 LHS = SToVLHS; 14939 } 14940 if (SToVRHS) { 14941 if (!IsLittleEndian && SToVRHS.getValueType().getScalarSizeInBits() >= 64) 14942 return Res; 14943 RHSMinIdx = NumEltsOut; 14944 RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx; 14945 SToVRHS = getSToVPermuted(SToVRHS, DAG, Subtarget); 14946 if (SToVRHS.getValueType() != RHS.getValueType()) 14947 SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS); 14948 RHS = SToVRHS; 14949 } 14950 14951 // Fix up the shuffle mask to reflect where the desired element actually is. 14952 // The minimum and maximum indices that correspond to element zero for both 14953 // the LHS and RHS are computed and will control which shuffle mask entries 14954 // are to be changed. For example, if the RHS is permuted, any shuffle mask 14955 // entries in the range [RHSMinIdx,RHSMaxIdx) will be adjusted. 14956 fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx, 14957 HalfVec, ValidLaneWidth, Subtarget); 14958 Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV); 14959 14960 // We may have simplified away the shuffle. We won't be able to do anything 14961 // further with it here. 14962 if (!isa<ShuffleVectorSDNode>(Res)) 14963 return Res; 14964 Mask = cast<ShuffleVectorSDNode>(Res)->getMask(); 14965 } 14966 14967 SDValue TheSplat = IsLittleEndian ? RHS : LHS; 14968 // The common case after we commuted the shuffle is that the RHS is a splat 14969 // and we have elements coming in from the splat at indices that are not 14970 // conducive to using a merge. 14971 // Example: 14972 // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero> 14973 if (!isSplatBV(TheSplat)) 14974 return Res; 14975 14976 // We are looking for a mask such that all even elements are from 14977 // one vector and all odd elements from the other. 14978 if (!isAlternatingShuffMask(Mask, NumElts)) 14979 return Res; 14980 14981 // Adjust the mask so we are pulling in the same index from the splat 14982 // as the index from the interesting vector in consecutive elements. 14983 if (IsLittleEndian) { 14984 // Example (even elements from first vector): 14985 // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero> 14986 if (Mask[0] < NumElts) 14987 for (int i = 1, e = Mask.size(); i < e; i += 2) { 14988 if (ShuffV[i] < 0) 14989 continue; 14990 ShuffV[i] = (ShuffV[i - 1] + NumElts); 14991 } 14992 // Example (odd elements from first vector): 14993 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero> 14994 else 14995 for (int i = 0, e = Mask.size(); i < e; i += 2) { 14996 if (ShuffV[i] < 0) 14997 continue; 14998 ShuffV[i] = (ShuffV[i + 1] + NumElts); 14999 } 15000 } else { 15001 // Example (even elements from first vector): 15002 // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> <zero>, t1 15003 if (Mask[0] < NumElts) 15004 for (int i = 0, e = Mask.size(); i < e; i += 2) { 15005 if (ShuffV[i] < 0) 15006 continue; 15007 ShuffV[i] = ShuffV[i + 1] - NumElts; 15008 } 15009 // Example (odd elements from first vector): 15010 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> <zero>, t1 15011 else 15012 for (int i = 1, e = Mask.size(); i < e; i += 2) { 15013 if (ShuffV[i] < 0) 15014 continue; 15015 ShuffV[i] = ShuffV[i - 1] - NumElts; 15016 } 15017 } 15018 15019 // If the RHS has undefs, we need to remove them since we may have created 15020 // a shuffle that adds those instead of the splat value. 15021 SDValue SplatVal = 15022 cast<BuildVectorSDNode>(TheSplat.getNode())->getSplatValue(); 15023 TheSplat = DAG.getSplatBuildVector(TheSplat.getValueType(), dl, SplatVal); 15024 15025 if (IsLittleEndian) 15026 RHS = TheSplat; 15027 else 15028 LHS = TheSplat; 15029 return DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV); 15030 } 15031 15032 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN, 15033 LSBaseSDNode *LSBase, 15034 DAGCombinerInfo &DCI) const { 15035 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) && 15036 "Not a reverse memop pattern!"); 15037 15038 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool { 15039 auto Mask = SVN->getMask(); 15040 int i = 0; 15041 auto I = Mask.rbegin(); 15042 auto E = Mask.rend(); 15043 15044 for (; I != E; ++I) { 15045 if (*I != i) 15046 return false; 15047 i++; 15048 } 15049 return true; 15050 }; 15051 15052 SelectionDAG &DAG = DCI.DAG; 15053 EVT VT = SVN->getValueType(0); 15054 15055 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX()) 15056 return SDValue(); 15057 15058 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order. 15059 // See comment in PPCVSXSwapRemoval.cpp. 15060 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it. 15061 if (!Subtarget.hasP9Vector()) 15062 return SDValue(); 15063 15064 if(!IsElementReverse(SVN)) 15065 return SDValue(); 15066 15067 if (LSBase->getOpcode() == ISD::LOAD) { 15068 // If the load return value 0 has more than one user except the 15069 // shufflevector instruction, it is not profitable to replace the 15070 // shufflevector with a reverse load. 15071 for (SDNode::use_iterator UI = LSBase->use_begin(), UE = LSBase->use_end(); 15072 UI != UE; ++UI) 15073 if (UI.getUse().getResNo() == 0 && UI->getOpcode() != ISD::VECTOR_SHUFFLE) 15074 return SDValue(); 15075 15076 SDLoc dl(LSBase); 15077 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()}; 15078 return DAG.getMemIntrinsicNode( 15079 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps, 15080 LSBase->getMemoryVT(), LSBase->getMemOperand()); 15081 } 15082 15083 if (LSBase->getOpcode() == ISD::STORE) { 15084 // If there are other uses of the shuffle, the swap cannot be avoided. 15085 // Forcing the use of an X-Form (since swapped stores only have 15086 // X-Forms) without removing the swap is unprofitable. 15087 if (!SVN->hasOneUse()) 15088 return SDValue(); 15089 15090 SDLoc dl(LSBase); 15091 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0), 15092 LSBase->getBasePtr()}; 15093 return DAG.getMemIntrinsicNode( 15094 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps, 15095 LSBase->getMemoryVT(), LSBase->getMemOperand()); 15096 } 15097 15098 llvm_unreachable("Expected a load or store node here"); 15099 } 15100 15101 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 15102 DAGCombinerInfo &DCI) const { 15103 SelectionDAG &DAG = DCI.DAG; 15104 SDLoc dl(N); 15105 switch (N->getOpcode()) { 15106 default: break; 15107 case ISD::ADD: 15108 return combineADD(N, DCI); 15109 case ISD::SHL: 15110 return combineSHL(N, DCI); 15111 case ISD::SRA: 15112 return combineSRA(N, DCI); 15113 case ISD::SRL: 15114 return combineSRL(N, DCI); 15115 case ISD::MUL: 15116 return combineMUL(N, DCI); 15117 case ISD::FMA: 15118 case PPCISD::FNMSUB: 15119 return combineFMALike(N, DCI); 15120 case PPCISD::SHL: 15121 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 15122 return N->getOperand(0); 15123 break; 15124 case PPCISD::SRL: 15125 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 15126 return N->getOperand(0); 15127 break; 15128 case PPCISD::SRA: 15129 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 15130 if (C->isZero() || // 0 >>s V -> 0. 15131 C->isAllOnes()) // -1 >>s V -> -1. 15132 return N->getOperand(0); 15133 } 15134 break; 15135 case ISD::SIGN_EXTEND: 15136 case ISD::ZERO_EXTEND: 15137 case ISD::ANY_EXTEND: 15138 return DAGCombineExtBoolTrunc(N, DCI); 15139 case ISD::TRUNCATE: 15140 return combineTRUNCATE(N, DCI); 15141 case ISD::SETCC: 15142 if (SDValue CSCC = combineSetCC(N, DCI)) 15143 return CSCC; 15144 LLVM_FALLTHROUGH; 15145 case ISD::SELECT_CC: 15146 return DAGCombineTruncBoolExt(N, DCI); 15147 case ISD::SINT_TO_FP: 15148 case ISD::UINT_TO_FP: 15149 return combineFPToIntToFP(N, DCI); 15150 case ISD::VECTOR_SHUFFLE: 15151 if (ISD::isNormalLoad(N->getOperand(0).getNode())) { 15152 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0)); 15153 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI); 15154 } 15155 return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG); 15156 case ISD::STORE: { 15157 15158 EVT Op1VT = N->getOperand(1).getValueType(); 15159 unsigned Opcode = N->getOperand(1).getOpcode(); 15160 15161 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) { 15162 SDValue Val= combineStoreFPToInt(N, DCI); 15163 if (Val) 15164 return Val; 15165 } 15166 15167 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) { 15168 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1)); 15169 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI); 15170 if (Val) 15171 return Val; 15172 } 15173 15174 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 15175 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP && 15176 N->getOperand(1).getNode()->hasOneUse() && 15177 (Op1VT == MVT::i32 || Op1VT == MVT::i16 || 15178 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) { 15179 15180 // STBRX can only handle simple types and it makes no sense to store less 15181 // two bytes in byte-reversed order. 15182 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 15183 if (mVT.isExtended() || mVT.getSizeInBits() < 16) 15184 break; 15185 15186 SDValue BSwapOp = N->getOperand(1).getOperand(0); 15187 // Do an any-extend to 32-bits if this is a half-word input. 15188 if (BSwapOp.getValueType() == MVT::i16) 15189 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 15190 15191 // If the type of BSWAP operand is wider than stored memory width 15192 // it need to be shifted to the right side before STBRX. 15193 if (Op1VT.bitsGT(mVT)) { 15194 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 15195 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 15196 DAG.getConstant(Shift, dl, MVT::i32)); 15197 // Need to truncate if this is a bswap of i64 stored as i32/i16. 15198 if (Op1VT == MVT::i64) 15199 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 15200 } 15201 15202 SDValue Ops[] = { 15203 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 15204 }; 15205 return 15206 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 15207 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 15208 cast<StoreSDNode>(N)->getMemOperand()); 15209 } 15210 15211 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 15212 // So it can increase the chance of CSE constant construction. 15213 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 15214 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) { 15215 // Need to sign-extended to 64-bits to handle negative values. 15216 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 15217 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 15218 MemVT.getSizeInBits()); 15219 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 15220 15221 // DAG.getTruncStore() can't be used here because it doesn't accept 15222 // the general (base + offset) addressing mode. 15223 // So we use UpdateNodeOperands and setTruncatingStore instead. 15224 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 15225 N->getOperand(3)); 15226 cast<StoreSDNode>(N)->setTruncatingStore(true); 15227 return SDValue(N, 0); 15228 } 15229 15230 // For little endian, VSX stores require generating xxswapd/lxvd2x. 15231 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 15232 if (Op1VT.isSimple()) { 15233 MVT StoreVT = Op1VT.getSimpleVT(); 15234 if (Subtarget.needsSwapsForVSXMemOps() && 15235 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 15236 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 15237 return expandVSXStoreForLE(N, DCI); 15238 } 15239 break; 15240 } 15241 case ISD::LOAD: { 15242 LoadSDNode *LD = cast<LoadSDNode>(N); 15243 EVT VT = LD->getValueType(0); 15244 15245 // For little endian, VSX loads require generating lxvd2x/xxswapd. 15246 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 15247 if (VT.isSimple()) { 15248 MVT LoadVT = VT.getSimpleVT(); 15249 if (Subtarget.needsSwapsForVSXMemOps() && 15250 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 15251 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 15252 return expandVSXLoadForLE(N, DCI); 15253 } 15254 15255 // We sometimes end up with a 64-bit integer load, from which we extract 15256 // two single-precision floating-point numbers. This happens with 15257 // std::complex<float>, and other similar structures, because of the way we 15258 // canonicalize structure copies. However, if we lack direct moves, 15259 // then the final bitcasts from the extracted integer values to the 15260 // floating-point numbers turn into store/load pairs. Even with direct moves, 15261 // just loading the two floating-point numbers is likely better. 15262 auto ReplaceTwoFloatLoad = [&]() { 15263 if (VT != MVT::i64) 15264 return false; 15265 15266 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 15267 LD->isVolatile()) 15268 return false; 15269 15270 // We're looking for a sequence like this: 15271 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 15272 // t16: i64 = srl t13, Constant:i32<32> 15273 // t17: i32 = truncate t16 15274 // t18: f32 = bitcast t17 15275 // t19: i32 = truncate t13 15276 // t20: f32 = bitcast t19 15277 15278 if (!LD->hasNUsesOfValue(2, 0)) 15279 return false; 15280 15281 auto UI = LD->use_begin(); 15282 while (UI.getUse().getResNo() != 0) ++UI; 15283 SDNode *Trunc = *UI++; 15284 while (UI.getUse().getResNo() != 0) ++UI; 15285 SDNode *RightShift = *UI; 15286 if (Trunc->getOpcode() != ISD::TRUNCATE) 15287 std::swap(Trunc, RightShift); 15288 15289 if (Trunc->getOpcode() != ISD::TRUNCATE || 15290 Trunc->getValueType(0) != MVT::i32 || 15291 !Trunc->hasOneUse()) 15292 return false; 15293 if (RightShift->getOpcode() != ISD::SRL || 15294 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 15295 RightShift->getConstantOperandVal(1) != 32 || 15296 !RightShift->hasOneUse()) 15297 return false; 15298 15299 SDNode *Trunc2 = *RightShift->use_begin(); 15300 if (Trunc2->getOpcode() != ISD::TRUNCATE || 15301 Trunc2->getValueType(0) != MVT::i32 || 15302 !Trunc2->hasOneUse()) 15303 return false; 15304 15305 SDNode *Bitcast = *Trunc->use_begin(); 15306 SDNode *Bitcast2 = *Trunc2->use_begin(); 15307 15308 if (Bitcast->getOpcode() != ISD::BITCAST || 15309 Bitcast->getValueType(0) != MVT::f32) 15310 return false; 15311 if (Bitcast2->getOpcode() != ISD::BITCAST || 15312 Bitcast2->getValueType(0) != MVT::f32) 15313 return false; 15314 15315 if (Subtarget.isLittleEndian()) 15316 std::swap(Bitcast, Bitcast2); 15317 15318 // Bitcast has the second float (in memory-layout order) and Bitcast2 15319 // has the first one. 15320 15321 SDValue BasePtr = LD->getBasePtr(); 15322 if (LD->isIndexed()) { 15323 assert(LD->getAddressingMode() == ISD::PRE_INC && 15324 "Non-pre-inc AM on PPC?"); 15325 BasePtr = 15326 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 15327 LD->getOffset()); 15328 } 15329 15330 auto MMOFlags = 15331 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 15332 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 15333 LD->getPointerInfo(), LD->getAlign(), 15334 MMOFlags, LD->getAAInfo()); 15335 SDValue AddPtr = 15336 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 15337 BasePtr, DAG.getIntPtrConstant(4, dl)); 15338 SDValue FloatLoad2 = DAG.getLoad( 15339 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 15340 LD->getPointerInfo().getWithOffset(4), 15341 commonAlignment(LD->getAlign(), 4), MMOFlags, LD->getAAInfo()); 15342 15343 if (LD->isIndexed()) { 15344 // Note that DAGCombine should re-form any pre-increment load(s) from 15345 // what is produced here if that makes sense. 15346 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 15347 } 15348 15349 DCI.CombineTo(Bitcast2, FloatLoad); 15350 DCI.CombineTo(Bitcast, FloatLoad2); 15351 15352 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 15353 SDValue(FloatLoad2.getNode(), 1)); 15354 return true; 15355 }; 15356 15357 if (ReplaceTwoFloatLoad()) 15358 return SDValue(N, 0); 15359 15360 EVT MemVT = LD->getMemoryVT(); 15361 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 15362 Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty); 15363 if (LD->isUnindexed() && VT.isVector() && 15364 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 15365 // P8 and later hardware should just use LOAD. 15366 !Subtarget.hasP8Vector() && 15367 (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || 15368 VT == MVT::v4f32))) && 15369 LD->getAlign() < ABIAlignment) { 15370 // This is a type-legal unaligned Altivec load. 15371 SDValue Chain = LD->getChain(); 15372 SDValue Ptr = LD->getBasePtr(); 15373 bool isLittleEndian = Subtarget.isLittleEndian(); 15374 15375 // This implements the loading of unaligned vectors as described in 15376 // the venerable Apple Velocity Engine overview. Specifically: 15377 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 15378 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 15379 // 15380 // The general idea is to expand a sequence of one or more unaligned 15381 // loads into an alignment-based permutation-control instruction (lvsl 15382 // or lvsr), a series of regular vector loads (which always truncate 15383 // their input address to an aligned address), and a series of 15384 // permutations. The results of these permutations are the requested 15385 // loaded values. The trick is that the last "extra" load is not taken 15386 // from the address you might suspect (sizeof(vector) bytes after the 15387 // last requested load), but rather sizeof(vector) - 1 bytes after the 15388 // last requested vector. The point of this is to avoid a page fault if 15389 // the base address happened to be aligned. This works because if the 15390 // base address is aligned, then adding less than a full vector length 15391 // will cause the last vector in the sequence to be (re)loaded. 15392 // Otherwise, the next vector will be fetched as you might suspect was 15393 // necessary. 15394 15395 // We might be able to reuse the permutation generation from 15396 // a different base address offset from this one by an aligned amount. 15397 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 15398 // optimization later. 15399 Intrinsic::ID Intr, IntrLD, IntrPerm; 15400 MVT PermCntlTy, PermTy, LDTy; 15401 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr 15402 : Intrinsic::ppc_altivec_lvsl; 15403 IntrLD = Intrinsic::ppc_altivec_lvx; 15404 IntrPerm = Intrinsic::ppc_altivec_vperm; 15405 PermCntlTy = MVT::v16i8; 15406 PermTy = MVT::v4i32; 15407 LDTy = MVT::v4i32; 15408 15409 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 15410 15411 // Create the new MMO for the new base load. It is like the original MMO, 15412 // but represents an area in memory almost twice the vector size centered 15413 // on the original address. If the address is unaligned, we might start 15414 // reading up to (sizeof(vector)-1) bytes below the address of the 15415 // original unaligned load. 15416 MachineFunction &MF = DAG.getMachineFunction(); 15417 MachineMemOperand *BaseMMO = 15418 MF.getMachineMemOperand(LD->getMemOperand(), 15419 -(long)MemVT.getStoreSize()+1, 15420 2*MemVT.getStoreSize()-1); 15421 15422 // Create the new base load. 15423 SDValue LDXIntID = 15424 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 15425 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 15426 SDValue BaseLoad = 15427 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 15428 DAG.getVTList(PermTy, MVT::Other), 15429 BaseLoadOps, LDTy, BaseMMO); 15430 15431 // Note that the value of IncOffset (which is provided to the next 15432 // load's pointer info offset value, and thus used to calculate the 15433 // alignment), and the value of IncValue (which is actually used to 15434 // increment the pointer value) are different! This is because we 15435 // require the next load to appear to be aligned, even though it 15436 // is actually offset from the base pointer by a lesser amount. 15437 int IncOffset = VT.getSizeInBits() / 8; 15438 int IncValue = IncOffset; 15439 15440 // Walk (both up and down) the chain looking for another load at the real 15441 // (aligned) offset (the alignment of the other load does not matter in 15442 // this case). If found, then do not use the offset reduction trick, as 15443 // that will prevent the loads from being later combined (as they would 15444 // otherwise be duplicates). 15445 if (!findConsecutiveLoad(LD, DAG)) 15446 --IncValue; 15447 15448 SDValue Increment = 15449 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 15450 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 15451 15452 MachineMemOperand *ExtraMMO = 15453 MF.getMachineMemOperand(LD->getMemOperand(), 15454 1, 2*MemVT.getStoreSize()-1); 15455 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 15456 SDValue ExtraLoad = 15457 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 15458 DAG.getVTList(PermTy, MVT::Other), 15459 ExtraLoadOps, LDTy, ExtraMMO); 15460 15461 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 15462 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 15463 15464 // Because vperm has a big-endian bias, we must reverse the order 15465 // of the input vectors and complement the permute control vector 15466 // when generating little endian code. We have already handled the 15467 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 15468 // and ExtraLoad here. 15469 SDValue Perm; 15470 if (isLittleEndian) 15471 Perm = BuildIntrinsicOp(IntrPerm, 15472 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 15473 else 15474 Perm = BuildIntrinsicOp(IntrPerm, 15475 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 15476 15477 if (VT != PermTy) 15478 Perm = Subtarget.hasAltivec() 15479 ? DAG.getNode(ISD::BITCAST, dl, VT, Perm) 15480 : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, 15481 DAG.getTargetConstant(1, dl, MVT::i64)); 15482 // second argument is 1 because this rounding 15483 // is always exact. 15484 15485 // The output of the permutation is our loaded result, the TokenFactor is 15486 // our new chain. 15487 DCI.CombineTo(N, Perm, TF); 15488 return SDValue(N, 0); 15489 } 15490 } 15491 break; 15492 case ISD::INTRINSIC_WO_CHAIN: { 15493 bool isLittleEndian = Subtarget.isLittleEndian(); 15494 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 15495 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 15496 : Intrinsic::ppc_altivec_lvsl); 15497 if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) { 15498 SDValue Add = N->getOperand(1); 15499 15500 int Bits = 4 /* 16 byte alignment */; 15501 15502 if (DAG.MaskedValueIsZero(Add->getOperand(1), 15503 APInt::getAllOnes(Bits /* alignment */) 15504 .zext(Add.getScalarValueSizeInBits()))) { 15505 SDNode *BasePtr = Add->getOperand(0).getNode(); 15506 for (SDNode *U : BasePtr->uses()) { 15507 if (U->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 15508 cast<ConstantSDNode>(U->getOperand(0))->getZExtValue() == IID) { 15509 // We've found another LVSL/LVSR, and this address is an aligned 15510 // multiple of that one. The results will be the same, so use the 15511 // one we've just found instead. 15512 15513 return SDValue(U, 0); 15514 } 15515 } 15516 } 15517 15518 if (isa<ConstantSDNode>(Add->getOperand(1))) { 15519 SDNode *BasePtr = Add->getOperand(0).getNode(); 15520 for (SDNode *U : BasePtr->uses()) { 15521 if (U->getOpcode() == ISD::ADD && 15522 isa<ConstantSDNode>(U->getOperand(1)) && 15523 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 15524 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue()) % 15525 (1ULL << Bits) == 15526 0) { 15527 SDNode *OtherAdd = U; 15528 for (SDNode *V : OtherAdd->uses()) { 15529 if (V->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 15530 cast<ConstantSDNode>(V->getOperand(0))->getZExtValue() == 15531 IID) { 15532 return SDValue(V, 0); 15533 } 15534 } 15535 } 15536 } 15537 } 15538 } 15539 15540 // Combine vmaxsw/h/b(a, a's negation) to abs(a) 15541 // Expose the vabsduw/h/b opportunity for down stream 15542 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() && 15543 (IID == Intrinsic::ppc_altivec_vmaxsw || 15544 IID == Intrinsic::ppc_altivec_vmaxsh || 15545 IID == Intrinsic::ppc_altivec_vmaxsb)) { 15546 SDValue V1 = N->getOperand(1); 15547 SDValue V2 = N->getOperand(2); 15548 if ((V1.getSimpleValueType() == MVT::v4i32 || 15549 V1.getSimpleValueType() == MVT::v8i16 || 15550 V1.getSimpleValueType() == MVT::v16i8) && 15551 V1.getSimpleValueType() == V2.getSimpleValueType()) { 15552 // (0-a, a) 15553 if (V1.getOpcode() == ISD::SUB && 15554 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 15555 V1.getOperand(1) == V2) { 15556 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2); 15557 } 15558 // (a, 0-a) 15559 if (V2.getOpcode() == ISD::SUB && 15560 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 15561 V2.getOperand(1) == V1) { 15562 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 15563 } 15564 // (x-y, y-x) 15565 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB && 15566 V1.getOperand(0) == V2.getOperand(1) && 15567 V1.getOperand(1) == V2.getOperand(0)) { 15568 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 15569 } 15570 } 15571 } 15572 } 15573 15574 break; 15575 case ISD::INTRINSIC_W_CHAIN: 15576 // For little endian, VSX loads require generating lxvd2x/xxswapd. 15577 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 15578 if (Subtarget.needsSwapsForVSXMemOps()) { 15579 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 15580 default: 15581 break; 15582 case Intrinsic::ppc_vsx_lxvw4x: 15583 case Intrinsic::ppc_vsx_lxvd2x: 15584 return expandVSXLoadForLE(N, DCI); 15585 } 15586 } 15587 break; 15588 case ISD::INTRINSIC_VOID: 15589 // For little endian, VSX stores require generating xxswapd/stxvd2x. 15590 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 15591 if (Subtarget.needsSwapsForVSXMemOps()) { 15592 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 15593 default: 15594 break; 15595 case Intrinsic::ppc_vsx_stxvw4x: 15596 case Intrinsic::ppc_vsx_stxvd2x: 15597 return expandVSXStoreForLE(N, DCI); 15598 } 15599 } 15600 break; 15601 case ISD::BSWAP: { 15602 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 15603 // For subtargets without LDBRX, we can still do better than the default 15604 // expansion even for 64-bit BSWAP (LOAD). 15605 bool Is64BitBswapOn64BitTgt = 15606 Subtarget.isPPC64() && N->getValueType(0) == MVT::i64; 15607 bool IsSingleUseNormalLd = ISD::isNormalLoad(N->getOperand(0).getNode()) && 15608 N->getOperand(0).hasOneUse(); 15609 if (IsSingleUseNormalLd && 15610 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 15611 (Subtarget.hasLDBRX() && Is64BitBswapOn64BitTgt))) { 15612 SDValue Load = N->getOperand(0); 15613 LoadSDNode *LD = cast<LoadSDNode>(Load); 15614 // Create the byte-swapping load. 15615 SDValue Ops[] = { 15616 LD->getChain(), // Chain 15617 LD->getBasePtr(), // Ptr 15618 DAG.getValueType(N->getValueType(0)) // VT 15619 }; 15620 SDValue BSLoad = 15621 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 15622 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 15623 MVT::i64 : MVT::i32, MVT::Other), 15624 Ops, LD->getMemoryVT(), LD->getMemOperand()); 15625 15626 // If this is an i16 load, insert the truncate. 15627 SDValue ResVal = BSLoad; 15628 if (N->getValueType(0) == MVT::i16) 15629 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 15630 15631 // First, combine the bswap away. This makes the value produced by the 15632 // load dead. 15633 DCI.CombineTo(N, ResVal); 15634 15635 // Next, combine the load away, we give it a bogus result value but a real 15636 // chain result. The result value is dead because the bswap is dead. 15637 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 15638 15639 // Return N so it doesn't get rechecked! 15640 return SDValue(N, 0); 15641 } 15642 // Convert this to two 32-bit bswap loads and a BUILD_PAIR. Do this only 15643 // before legalization so that the BUILD_PAIR is handled correctly. 15644 if (!DCI.isBeforeLegalize() || !Is64BitBswapOn64BitTgt || 15645 !IsSingleUseNormalLd) 15646 return SDValue(); 15647 LoadSDNode *LD = cast<LoadSDNode>(N->getOperand(0)); 15648 15649 // Can't split volatile or atomic loads. 15650 if (!LD->isSimple()) 15651 return SDValue(); 15652 SDValue BasePtr = LD->getBasePtr(); 15653 SDValue Lo = DAG.getLoad(MVT::i32, dl, LD->getChain(), BasePtr, 15654 LD->getPointerInfo(), LD->getAlign()); 15655 Lo = DAG.getNode(ISD::BSWAP, dl, MVT::i32, Lo); 15656 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 15657 DAG.getIntPtrConstant(4, dl)); 15658 MachineMemOperand *NewMMO = DAG.getMachineFunction().getMachineMemOperand( 15659 LD->getMemOperand(), 4, 4); 15660 SDValue Hi = DAG.getLoad(MVT::i32, dl, LD->getChain(), BasePtr, NewMMO); 15661 Hi = DAG.getNode(ISD::BSWAP, dl, MVT::i32, Hi); 15662 SDValue Res; 15663 if (Subtarget.isLittleEndian()) 15664 Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Hi, Lo); 15665 else 15666 Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 15667 SDValue TF = 15668 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 15669 Hi.getOperand(0).getValue(1), Lo.getOperand(0).getValue(1)); 15670 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), TF); 15671 return Res; 15672 } 15673 case PPCISD::VCMP: 15674 // If a VCMP_rec node already exists with exactly the same operands as this 15675 // node, use its result instead of this node (VCMP_rec computes both a CR6 15676 // and a normal output). 15677 // 15678 if (!N->getOperand(0).hasOneUse() && 15679 !N->getOperand(1).hasOneUse() && 15680 !N->getOperand(2).hasOneUse()) { 15681 15682 // Scan all of the users of the LHS, looking for VCMP_rec's that match. 15683 SDNode *VCMPrecNode = nullptr; 15684 15685 SDNode *LHSN = N->getOperand(0).getNode(); 15686 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 15687 UI != E; ++UI) 15688 if (UI->getOpcode() == PPCISD::VCMP_rec && 15689 UI->getOperand(1) == N->getOperand(1) && 15690 UI->getOperand(2) == N->getOperand(2) && 15691 UI->getOperand(0) == N->getOperand(0)) { 15692 VCMPrecNode = *UI; 15693 break; 15694 } 15695 15696 // If there is no VCMP_rec node, or if the flag value has a single use, 15697 // don't transform this. 15698 if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1)) 15699 break; 15700 15701 // Look at the (necessarily single) use of the flag value. If it has a 15702 // chain, this transformation is more complex. Note that multiple things 15703 // could use the value result, which we should ignore. 15704 SDNode *FlagUser = nullptr; 15705 for (SDNode::use_iterator UI = VCMPrecNode->use_begin(); 15706 FlagUser == nullptr; ++UI) { 15707 assert(UI != VCMPrecNode->use_end() && "Didn't find user!"); 15708 SDNode *User = *UI; 15709 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 15710 if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) { 15711 FlagUser = User; 15712 break; 15713 } 15714 } 15715 } 15716 15717 // If the user is a MFOCRF instruction, we know this is safe. 15718 // Otherwise we give up for right now. 15719 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 15720 return SDValue(VCMPrecNode, 0); 15721 } 15722 break; 15723 case ISD::BRCOND: { 15724 SDValue Cond = N->getOperand(1); 15725 SDValue Target = N->getOperand(2); 15726 15727 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 15728 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 15729 Intrinsic::loop_decrement) { 15730 15731 // We now need to make the intrinsic dead (it cannot be instruction 15732 // selected). 15733 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 15734 assert(Cond.getNode()->hasOneUse() && 15735 "Counter decrement has more than one use"); 15736 15737 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 15738 N->getOperand(0), Target); 15739 } 15740 } 15741 break; 15742 case ISD::BR_CC: { 15743 // If this is a branch on an altivec predicate comparison, lower this so 15744 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 15745 // lowering is done pre-legalize, because the legalizer lowers the predicate 15746 // compare down to code that is difficult to reassemble. 15747 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 15748 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 15749 15750 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 15751 // value. If so, pass-through the AND to get to the intrinsic. 15752 if (LHS.getOpcode() == ISD::AND && 15753 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 15754 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 15755 Intrinsic::loop_decrement && 15756 isa<ConstantSDNode>(LHS.getOperand(1)) && 15757 !isNullConstant(LHS.getOperand(1))) 15758 LHS = LHS.getOperand(0); 15759 15760 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 15761 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 15762 Intrinsic::loop_decrement && 15763 isa<ConstantSDNode>(RHS)) { 15764 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 15765 "Counter decrement comparison is not EQ or NE"); 15766 15767 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 15768 bool isBDNZ = (CC == ISD::SETEQ && Val) || 15769 (CC == ISD::SETNE && !Val); 15770 15771 // We now need to make the intrinsic dead (it cannot be instruction 15772 // selected). 15773 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 15774 assert(LHS.getNode()->hasOneUse() && 15775 "Counter decrement has more than one use"); 15776 15777 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 15778 N->getOperand(0), N->getOperand(4)); 15779 } 15780 15781 int CompareOpc; 15782 bool isDot; 15783 15784 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 15785 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 15786 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 15787 assert(isDot && "Can't compare against a vector result!"); 15788 15789 // If this is a comparison against something other than 0/1, then we know 15790 // that the condition is never/always true. 15791 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 15792 if (Val != 0 && Val != 1) { 15793 if (CC == ISD::SETEQ) // Cond never true, remove branch. 15794 return N->getOperand(0); 15795 // Always !=, turn it into an unconditional branch. 15796 return DAG.getNode(ISD::BR, dl, MVT::Other, 15797 N->getOperand(0), N->getOperand(4)); 15798 } 15799 15800 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 15801 15802 // Create the PPCISD altivec 'dot' comparison node. 15803 SDValue Ops[] = { 15804 LHS.getOperand(2), // LHS of compare 15805 LHS.getOperand(3), // RHS of compare 15806 DAG.getConstant(CompareOpc, dl, MVT::i32) 15807 }; 15808 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 15809 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops); 15810 15811 // Unpack the result based on how the target uses it. 15812 PPC::Predicate CompOpc; 15813 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 15814 default: // Can't happen, don't crash on invalid number though. 15815 case 0: // Branch on the value of the EQ bit of CR6. 15816 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 15817 break; 15818 case 1: // Branch on the inverted value of the EQ bit of CR6. 15819 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 15820 break; 15821 case 2: // Branch on the value of the LT bit of CR6. 15822 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 15823 break; 15824 case 3: // Branch on the inverted value of the LT bit of CR6. 15825 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 15826 break; 15827 } 15828 15829 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 15830 DAG.getConstant(CompOpc, dl, MVT::i32), 15831 DAG.getRegister(PPC::CR6, MVT::i32), 15832 N->getOperand(4), CompNode.getValue(1)); 15833 } 15834 break; 15835 } 15836 case ISD::BUILD_VECTOR: 15837 return DAGCombineBuildVector(N, DCI); 15838 case ISD::ABS: 15839 return combineABS(N, DCI); 15840 case ISD::VSELECT: 15841 return combineVSelect(N, DCI); 15842 } 15843 15844 return SDValue(); 15845 } 15846 15847 SDValue 15848 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 15849 SelectionDAG &DAG, 15850 SmallVectorImpl<SDNode *> &Created) const { 15851 // fold (sdiv X, pow2) 15852 EVT VT = N->getValueType(0); 15853 if (VT == MVT::i64 && !Subtarget.isPPC64()) 15854 return SDValue(); 15855 if ((VT != MVT::i32 && VT != MVT::i64) || 15856 !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2())) 15857 return SDValue(); 15858 15859 SDLoc DL(N); 15860 SDValue N0 = N->getOperand(0); 15861 15862 bool IsNegPow2 = Divisor.isNegatedPowerOf2(); 15863 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 15864 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 15865 15866 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 15867 Created.push_back(Op.getNode()); 15868 15869 if (IsNegPow2) { 15870 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 15871 Created.push_back(Op.getNode()); 15872 } 15873 15874 return Op; 15875 } 15876 15877 //===----------------------------------------------------------------------===// 15878 // Inline Assembly Support 15879 //===----------------------------------------------------------------------===// 15880 15881 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 15882 KnownBits &Known, 15883 const APInt &DemandedElts, 15884 const SelectionDAG &DAG, 15885 unsigned Depth) const { 15886 Known.resetAll(); 15887 switch (Op.getOpcode()) { 15888 default: break; 15889 case PPCISD::LBRX: { 15890 // lhbrx is known to have the top bits cleared out. 15891 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 15892 Known.Zero = 0xFFFF0000; 15893 break; 15894 } 15895 case ISD::INTRINSIC_WO_CHAIN: { 15896 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 15897 default: break; 15898 case Intrinsic::ppc_altivec_vcmpbfp_p: 15899 case Intrinsic::ppc_altivec_vcmpeqfp_p: 15900 case Intrinsic::ppc_altivec_vcmpequb_p: 15901 case Intrinsic::ppc_altivec_vcmpequh_p: 15902 case Intrinsic::ppc_altivec_vcmpequw_p: 15903 case Intrinsic::ppc_altivec_vcmpequd_p: 15904 case Intrinsic::ppc_altivec_vcmpequq_p: 15905 case Intrinsic::ppc_altivec_vcmpgefp_p: 15906 case Intrinsic::ppc_altivec_vcmpgtfp_p: 15907 case Intrinsic::ppc_altivec_vcmpgtsb_p: 15908 case Intrinsic::ppc_altivec_vcmpgtsh_p: 15909 case Intrinsic::ppc_altivec_vcmpgtsw_p: 15910 case Intrinsic::ppc_altivec_vcmpgtsd_p: 15911 case Intrinsic::ppc_altivec_vcmpgtsq_p: 15912 case Intrinsic::ppc_altivec_vcmpgtub_p: 15913 case Intrinsic::ppc_altivec_vcmpgtuh_p: 15914 case Intrinsic::ppc_altivec_vcmpgtuw_p: 15915 case Intrinsic::ppc_altivec_vcmpgtud_p: 15916 case Intrinsic::ppc_altivec_vcmpgtuq_p: 15917 Known.Zero = ~1U; // All bits but the low one are known to be zero. 15918 break; 15919 } 15920 break; 15921 } 15922 case ISD::INTRINSIC_W_CHAIN: { 15923 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 15924 default: 15925 break; 15926 case Intrinsic::ppc_load2r: 15927 // Top bits are cleared for load2r (which is the same as lhbrx). 15928 Known.Zero = 0xFFFF0000; 15929 break; 15930 } 15931 break; 15932 } 15933 } 15934 } 15935 15936 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 15937 switch (Subtarget.getCPUDirective()) { 15938 default: break; 15939 case PPC::DIR_970: 15940 case PPC::DIR_PWR4: 15941 case PPC::DIR_PWR5: 15942 case PPC::DIR_PWR5X: 15943 case PPC::DIR_PWR6: 15944 case PPC::DIR_PWR6X: 15945 case PPC::DIR_PWR7: 15946 case PPC::DIR_PWR8: 15947 case PPC::DIR_PWR9: 15948 case PPC::DIR_PWR10: 15949 case PPC::DIR_PWR_FUTURE: { 15950 if (!ML) 15951 break; 15952 15953 if (!DisableInnermostLoopAlign32) { 15954 // If the nested loop is an innermost loop, prefer to a 32-byte alignment, 15955 // so that we can decrease cache misses and branch-prediction misses. 15956 // Actual alignment of the loop will depend on the hotness check and other 15957 // logic in alignBlocks. 15958 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty()) 15959 return Align(32); 15960 } 15961 15962 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 15963 15964 // For small loops (between 5 and 8 instructions), align to a 32-byte 15965 // boundary so that the entire loop fits in one instruction-cache line. 15966 uint64_t LoopSize = 0; 15967 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 15968 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 15969 LoopSize += TII->getInstSizeInBytes(*J); 15970 if (LoopSize > 32) 15971 break; 15972 } 15973 15974 if (LoopSize > 16 && LoopSize <= 32) 15975 return Align(32); 15976 15977 break; 15978 } 15979 } 15980 15981 return TargetLowering::getPrefLoopAlignment(ML); 15982 } 15983 15984 /// getConstraintType - Given a constraint, return the type of 15985 /// constraint it is for this target. 15986 PPCTargetLowering::ConstraintType 15987 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 15988 if (Constraint.size() == 1) { 15989 switch (Constraint[0]) { 15990 default: break; 15991 case 'b': 15992 case 'r': 15993 case 'f': 15994 case 'd': 15995 case 'v': 15996 case 'y': 15997 return C_RegisterClass; 15998 case 'Z': 15999 // FIXME: While Z does indicate a memory constraint, it specifically 16000 // indicates an r+r address (used in conjunction with the 'y' modifier 16001 // in the replacement string). Currently, we're forcing the base 16002 // register to be r0 in the asm printer (which is interpreted as zero) 16003 // and forming the complete address in the second register. This is 16004 // suboptimal. 16005 return C_Memory; 16006 } 16007 } else if (Constraint == "wc") { // individual CR bits. 16008 return C_RegisterClass; 16009 } else if (Constraint == "wa" || Constraint == "wd" || 16010 Constraint == "wf" || Constraint == "ws" || 16011 Constraint == "wi" || Constraint == "ww") { 16012 return C_RegisterClass; // VSX registers. 16013 } 16014 return TargetLowering::getConstraintType(Constraint); 16015 } 16016 16017 /// Examine constraint type and operand type and determine a weight value. 16018 /// This object must already have been set up with the operand type 16019 /// and the current alternative constraint selected. 16020 TargetLowering::ConstraintWeight 16021 PPCTargetLowering::getSingleConstraintMatchWeight( 16022 AsmOperandInfo &info, const char *constraint) const { 16023 ConstraintWeight weight = CW_Invalid; 16024 Value *CallOperandVal = info.CallOperandVal; 16025 // If we don't have a value, we can't do a match, 16026 // but allow it at the lowest weight. 16027 if (!CallOperandVal) 16028 return CW_Default; 16029 Type *type = CallOperandVal->getType(); 16030 16031 // Look at the constraint type. 16032 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 16033 return CW_Register; // an individual CR bit. 16034 else if ((StringRef(constraint) == "wa" || 16035 StringRef(constraint) == "wd" || 16036 StringRef(constraint) == "wf") && 16037 type->isVectorTy()) 16038 return CW_Register; 16039 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64)) 16040 return CW_Register; // just hold 64-bit integers data. 16041 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 16042 return CW_Register; 16043 else if (StringRef(constraint) == "ww" && type->isFloatTy()) 16044 return CW_Register; 16045 16046 switch (*constraint) { 16047 default: 16048 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 16049 break; 16050 case 'b': 16051 if (type->isIntegerTy()) 16052 weight = CW_Register; 16053 break; 16054 case 'f': 16055 if (type->isFloatTy()) 16056 weight = CW_Register; 16057 break; 16058 case 'd': 16059 if (type->isDoubleTy()) 16060 weight = CW_Register; 16061 break; 16062 case 'v': 16063 if (type->isVectorTy()) 16064 weight = CW_Register; 16065 break; 16066 case 'y': 16067 weight = CW_Register; 16068 break; 16069 case 'Z': 16070 weight = CW_Memory; 16071 break; 16072 } 16073 return weight; 16074 } 16075 16076 std::pair<unsigned, const TargetRegisterClass *> 16077 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 16078 StringRef Constraint, 16079 MVT VT) const { 16080 if (Constraint.size() == 1) { 16081 // GCC RS6000 Constraint Letters 16082 switch (Constraint[0]) { 16083 case 'b': // R1-R31 16084 if (VT == MVT::i64 && Subtarget.isPPC64()) 16085 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 16086 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 16087 case 'r': // R0-R31 16088 if (VT == MVT::i64 && Subtarget.isPPC64()) 16089 return std::make_pair(0U, &PPC::G8RCRegClass); 16090 return std::make_pair(0U, &PPC::GPRCRegClass); 16091 // 'd' and 'f' constraints are both defined to be "the floating point 16092 // registers", where one is for 32-bit and the other for 64-bit. We don't 16093 // really care overly much here so just give them all the same reg classes. 16094 case 'd': 16095 case 'f': 16096 if (Subtarget.hasSPE()) { 16097 if (VT == MVT::f32 || VT == MVT::i32) 16098 return std::make_pair(0U, &PPC::GPRCRegClass); 16099 if (VT == MVT::f64 || VT == MVT::i64) 16100 return std::make_pair(0U, &PPC::SPERCRegClass); 16101 } else { 16102 if (VT == MVT::f32 || VT == MVT::i32) 16103 return std::make_pair(0U, &PPC::F4RCRegClass); 16104 if (VT == MVT::f64 || VT == MVT::i64) 16105 return std::make_pair(0U, &PPC::F8RCRegClass); 16106 } 16107 break; 16108 case 'v': 16109 if (Subtarget.hasAltivec() && VT.isVector()) 16110 return std::make_pair(0U, &PPC::VRRCRegClass); 16111 else if (Subtarget.hasVSX()) 16112 // Scalars in Altivec registers only make sense with VSX. 16113 return std::make_pair(0U, &PPC::VFRCRegClass); 16114 break; 16115 case 'y': // crrc 16116 return std::make_pair(0U, &PPC::CRRCRegClass); 16117 } 16118 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 16119 // An individual CR bit. 16120 return std::make_pair(0U, &PPC::CRBITRCRegClass); 16121 } else if ((Constraint == "wa" || Constraint == "wd" || 16122 Constraint == "wf" || Constraint == "wi") && 16123 Subtarget.hasVSX()) { 16124 // A VSX register for either a scalar (FP) or vector. There is no 16125 // support for single precision scalars on subtargets prior to Power8. 16126 if (VT.isVector()) 16127 return std::make_pair(0U, &PPC::VSRCRegClass); 16128 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 16129 return std::make_pair(0U, &PPC::VSSRCRegClass); 16130 return std::make_pair(0U, &PPC::VSFRCRegClass); 16131 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) { 16132 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 16133 return std::make_pair(0U, &PPC::VSSRCRegClass); 16134 else 16135 return std::make_pair(0U, &PPC::VSFRCRegClass); 16136 } else if (Constraint == "lr") { 16137 if (VT == MVT::i64) 16138 return std::make_pair(0U, &PPC::LR8RCRegClass); 16139 else 16140 return std::make_pair(0U, &PPC::LRRCRegClass); 16141 } 16142 16143 // Handle special cases of physical registers that are not properly handled 16144 // by the base class. 16145 if (Constraint[0] == '{' && Constraint[Constraint.size() - 1] == '}') { 16146 // If we name a VSX register, we can't defer to the base class because it 16147 // will not recognize the correct register (their names will be VSL{0-31} 16148 // and V{0-31} so they won't match). So we match them here. 16149 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') { 16150 int VSNum = atoi(Constraint.data() + 3); 16151 assert(VSNum >= 0 && VSNum <= 63 && 16152 "Attempted to access a vsr out of range"); 16153 if (VSNum < 32) 16154 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass); 16155 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass); 16156 } 16157 16158 // For float registers, we can't defer to the base class as it will match 16159 // the SPILLTOVSRRC class. 16160 if (Constraint.size() > 3 && Constraint[1] == 'f') { 16161 int RegNum = atoi(Constraint.data() + 2); 16162 if (RegNum > 31 || RegNum < 0) 16163 report_fatal_error("Invalid floating point register number"); 16164 if (VT == MVT::f32 || VT == MVT::i32) 16165 return Subtarget.hasSPE() 16166 ? std::make_pair(PPC::R0 + RegNum, &PPC::GPRCRegClass) 16167 : std::make_pair(PPC::F0 + RegNum, &PPC::F4RCRegClass); 16168 if (VT == MVT::f64 || VT == MVT::i64) 16169 return Subtarget.hasSPE() 16170 ? std::make_pair(PPC::S0 + RegNum, &PPC::SPERCRegClass) 16171 : std::make_pair(PPC::F0 + RegNum, &PPC::F8RCRegClass); 16172 } 16173 } 16174 16175 std::pair<unsigned, const TargetRegisterClass *> R = 16176 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 16177 16178 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 16179 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 16180 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 16181 // register. 16182 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 16183 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 16184 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 16185 PPC::GPRCRegClass.contains(R.first)) 16186 return std::make_pair(TRI->getMatchingSuperReg(R.first, 16187 PPC::sub_32, &PPC::G8RCRegClass), 16188 &PPC::G8RCRegClass); 16189 16190 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 16191 if (!R.second && StringRef("{cc}").equals_insensitive(Constraint)) { 16192 R.first = PPC::CR0; 16193 R.second = &PPC::CRRCRegClass; 16194 } 16195 // FIXME: This warning should ideally be emitted in the front end. 16196 const auto &TM = getTargetMachine(); 16197 if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) { 16198 if (((R.first >= PPC::V20 && R.first <= PPC::V31) || 16199 (R.first >= PPC::VF20 && R.first <= PPC::VF31)) && 16200 (R.second == &PPC::VSRCRegClass || R.second == &PPC::VSFRCRegClass)) 16201 errs() << "warning: vector registers 20 to 32 are reserved in the " 16202 "default AIX AltiVec ABI and cannot be used\n"; 16203 } 16204 16205 return R; 16206 } 16207 16208 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 16209 /// vector. If it is invalid, don't add anything to Ops. 16210 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 16211 std::string &Constraint, 16212 std::vector<SDValue>&Ops, 16213 SelectionDAG &DAG) const { 16214 SDValue Result; 16215 16216 // Only support length 1 constraints. 16217 if (Constraint.length() > 1) return; 16218 16219 char Letter = Constraint[0]; 16220 switch (Letter) { 16221 default: break; 16222 case 'I': 16223 case 'J': 16224 case 'K': 16225 case 'L': 16226 case 'M': 16227 case 'N': 16228 case 'O': 16229 case 'P': { 16230 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 16231 if (!CST) return; // Must be an immediate to match. 16232 SDLoc dl(Op); 16233 int64_t Value = CST->getSExtValue(); 16234 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 16235 // numbers are printed as such. 16236 switch (Letter) { 16237 default: llvm_unreachable("Unknown constraint letter!"); 16238 case 'I': // "I" is a signed 16-bit constant. 16239 if (isInt<16>(Value)) 16240 Result = DAG.getTargetConstant(Value, dl, TCVT); 16241 break; 16242 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 16243 if (isShiftedUInt<16, 16>(Value)) 16244 Result = DAG.getTargetConstant(Value, dl, TCVT); 16245 break; 16246 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 16247 if (isShiftedInt<16, 16>(Value)) 16248 Result = DAG.getTargetConstant(Value, dl, TCVT); 16249 break; 16250 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 16251 if (isUInt<16>(Value)) 16252 Result = DAG.getTargetConstant(Value, dl, TCVT); 16253 break; 16254 case 'M': // "M" is a constant that is greater than 31. 16255 if (Value > 31) 16256 Result = DAG.getTargetConstant(Value, dl, TCVT); 16257 break; 16258 case 'N': // "N" is a positive constant that is an exact power of two. 16259 if (Value > 0 && isPowerOf2_64(Value)) 16260 Result = DAG.getTargetConstant(Value, dl, TCVT); 16261 break; 16262 case 'O': // "O" is the constant zero. 16263 if (Value == 0) 16264 Result = DAG.getTargetConstant(Value, dl, TCVT); 16265 break; 16266 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 16267 if (isInt<16>(-Value)) 16268 Result = DAG.getTargetConstant(Value, dl, TCVT); 16269 break; 16270 } 16271 break; 16272 } 16273 } 16274 16275 if (Result.getNode()) { 16276 Ops.push_back(Result); 16277 return; 16278 } 16279 16280 // Handle standard constraint letters. 16281 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 16282 } 16283 16284 // isLegalAddressingMode - Return true if the addressing mode represented 16285 // by AM is legal for this target, for a load/store of the specified type. 16286 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 16287 const AddrMode &AM, Type *Ty, 16288 unsigned AS, 16289 Instruction *I) const { 16290 // Vector type r+i form is supported since power9 as DQ form. We don't check 16291 // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC, 16292 // imm form is preferred and the offset can be adjusted to use imm form later 16293 // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and 16294 // max offset to check legal addressing mode, we should be a little aggressive 16295 // to contain other offsets for that LSRUse. 16296 if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector()) 16297 return false; 16298 16299 // PPC allows a sign-extended 16-bit immediate field. 16300 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 16301 return false; 16302 16303 // No global is ever allowed as a base. 16304 if (AM.BaseGV) 16305 return false; 16306 16307 // PPC only support r+r, 16308 switch (AM.Scale) { 16309 case 0: // "r+i" or just "i", depending on HasBaseReg. 16310 break; 16311 case 1: 16312 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 16313 return false; 16314 // Otherwise we have r+r or r+i. 16315 break; 16316 case 2: 16317 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 16318 return false; 16319 // Allow 2*r as r+r. 16320 break; 16321 default: 16322 // No other scales are supported. 16323 return false; 16324 } 16325 16326 return true; 16327 } 16328 16329 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 16330 SelectionDAG &DAG) const { 16331 MachineFunction &MF = DAG.getMachineFunction(); 16332 MachineFrameInfo &MFI = MF.getFrameInfo(); 16333 MFI.setReturnAddressIsTaken(true); 16334 16335 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 16336 return SDValue(); 16337 16338 SDLoc dl(Op); 16339 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 16340 16341 // Make sure the function does not optimize away the store of the RA to 16342 // the stack. 16343 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 16344 FuncInfo->setLRStoreRequired(); 16345 bool isPPC64 = Subtarget.isPPC64(); 16346 auto PtrVT = getPointerTy(MF.getDataLayout()); 16347 16348 if (Depth > 0) { 16349 // The link register (return address) is saved in the caller's frame 16350 // not the callee's stack frame. So we must get the caller's frame 16351 // address and load the return address at the LR offset from there. 16352 SDValue FrameAddr = 16353 DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 16354 LowerFRAMEADDR(Op, DAG), MachinePointerInfo()); 16355 SDValue Offset = 16356 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 16357 isPPC64 ? MVT::i64 : MVT::i32); 16358 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 16359 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 16360 MachinePointerInfo()); 16361 } 16362 16363 // Just load the return address off the stack. 16364 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 16365 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 16366 MachinePointerInfo()); 16367 } 16368 16369 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 16370 SelectionDAG &DAG) const { 16371 SDLoc dl(Op); 16372 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 16373 16374 MachineFunction &MF = DAG.getMachineFunction(); 16375 MachineFrameInfo &MFI = MF.getFrameInfo(); 16376 MFI.setFrameAddressIsTaken(true); 16377 16378 EVT PtrVT = getPointerTy(MF.getDataLayout()); 16379 bool isPPC64 = PtrVT == MVT::i64; 16380 16381 // Naked functions never have a frame pointer, and so we use r1. For all 16382 // other functions, this decision must be delayed until during PEI. 16383 unsigned FrameReg; 16384 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 16385 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 16386 else 16387 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 16388 16389 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 16390 PtrVT); 16391 while (Depth--) 16392 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 16393 FrameAddr, MachinePointerInfo()); 16394 return FrameAddr; 16395 } 16396 16397 // FIXME? Maybe this could be a TableGen attribute on some registers and 16398 // this table could be generated automatically from RegInfo. 16399 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT, 16400 const MachineFunction &MF) const { 16401 bool isPPC64 = Subtarget.isPPC64(); 16402 16403 bool is64Bit = isPPC64 && VT == LLT::scalar(64); 16404 if (!is64Bit && VT != LLT::scalar(32)) 16405 report_fatal_error("Invalid register global variable type"); 16406 16407 Register Reg = StringSwitch<Register>(RegName) 16408 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 16409 .Case("r2", isPPC64 ? Register() : PPC::R2) 16410 .Case("r13", (is64Bit ? PPC::X13 : PPC::R13)) 16411 .Default(Register()); 16412 16413 if (Reg) 16414 return Reg; 16415 report_fatal_error("Invalid register name global variable"); 16416 } 16417 16418 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const { 16419 // 32-bit SVR4 ABI access everything as got-indirect. 16420 if (Subtarget.is32BitELFABI()) 16421 return true; 16422 16423 // AIX accesses everything indirectly through the TOC, which is similar to 16424 // the GOT. 16425 if (Subtarget.isAIXABI()) 16426 return true; 16427 16428 CodeModel::Model CModel = getTargetMachine().getCodeModel(); 16429 // If it is small or large code model, module locals are accessed 16430 // indirectly by loading their address from .toc/.got. 16431 if (CModel == CodeModel::Small || CModel == CodeModel::Large) 16432 return true; 16433 16434 // JumpTable and BlockAddress are accessed as got-indirect. 16435 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA)) 16436 return true; 16437 16438 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) 16439 return Subtarget.isGVIndirectSymbol(G->getGlobal()); 16440 16441 return false; 16442 } 16443 16444 bool 16445 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 16446 // The PowerPC target isn't yet aware of offsets. 16447 return false; 16448 } 16449 16450 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 16451 const CallInst &I, 16452 MachineFunction &MF, 16453 unsigned Intrinsic) const { 16454 switch (Intrinsic) { 16455 case Intrinsic::ppc_atomicrmw_xchg_i128: 16456 case Intrinsic::ppc_atomicrmw_add_i128: 16457 case Intrinsic::ppc_atomicrmw_sub_i128: 16458 case Intrinsic::ppc_atomicrmw_nand_i128: 16459 case Intrinsic::ppc_atomicrmw_and_i128: 16460 case Intrinsic::ppc_atomicrmw_or_i128: 16461 case Intrinsic::ppc_atomicrmw_xor_i128: 16462 case Intrinsic::ppc_cmpxchg_i128: 16463 Info.opc = ISD::INTRINSIC_W_CHAIN; 16464 Info.memVT = MVT::i128; 16465 Info.ptrVal = I.getArgOperand(0); 16466 Info.offset = 0; 16467 Info.align = Align(16); 16468 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 16469 MachineMemOperand::MOVolatile; 16470 return true; 16471 case Intrinsic::ppc_atomic_load_i128: 16472 Info.opc = ISD::INTRINSIC_W_CHAIN; 16473 Info.memVT = MVT::i128; 16474 Info.ptrVal = I.getArgOperand(0); 16475 Info.offset = 0; 16476 Info.align = Align(16); 16477 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; 16478 return true; 16479 case Intrinsic::ppc_atomic_store_i128: 16480 Info.opc = ISD::INTRINSIC_VOID; 16481 Info.memVT = MVT::i128; 16482 Info.ptrVal = I.getArgOperand(2); 16483 Info.offset = 0; 16484 Info.align = Align(16); 16485 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; 16486 return true; 16487 case Intrinsic::ppc_altivec_lvx: 16488 case Intrinsic::ppc_altivec_lvxl: 16489 case Intrinsic::ppc_altivec_lvebx: 16490 case Intrinsic::ppc_altivec_lvehx: 16491 case Intrinsic::ppc_altivec_lvewx: 16492 case Intrinsic::ppc_vsx_lxvd2x: 16493 case Intrinsic::ppc_vsx_lxvw4x: 16494 case Intrinsic::ppc_vsx_lxvd2x_be: 16495 case Intrinsic::ppc_vsx_lxvw4x_be: 16496 case Intrinsic::ppc_vsx_lxvl: 16497 case Intrinsic::ppc_vsx_lxvll: { 16498 EVT VT; 16499 switch (Intrinsic) { 16500 case Intrinsic::ppc_altivec_lvebx: 16501 VT = MVT::i8; 16502 break; 16503 case Intrinsic::ppc_altivec_lvehx: 16504 VT = MVT::i16; 16505 break; 16506 case Intrinsic::ppc_altivec_lvewx: 16507 VT = MVT::i32; 16508 break; 16509 case Intrinsic::ppc_vsx_lxvd2x: 16510 case Intrinsic::ppc_vsx_lxvd2x_be: 16511 VT = MVT::v2f64; 16512 break; 16513 default: 16514 VT = MVT::v4i32; 16515 break; 16516 } 16517 16518 Info.opc = ISD::INTRINSIC_W_CHAIN; 16519 Info.memVT = VT; 16520 Info.ptrVal = I.getArgOperand(0); 16521 Info.offset = -VT.getStoreSize()+1; 16522 Info.size = 2*VT.getStoreSize()-1; 16523 Info.align = Align(1); 16524 Info.flags = MachineMemOperand::MOLoad; 16525 return true; 16526 } 16527 case Intrinsic::ppc_altivec_stvx: 16528 case Intrinsic::ppc_altivec_stvxl: 16529 case Intrinsic::ppc_altivec_stvebx: 16530 case Intrinsic::ppc_altivec_stvehx: 16531 case Intrinsic::ppc_altivec_stvewx: 16532 case Intrinsic::ppc_vsx_stxvd2x: 16533 case Intrinsic::ppc_vsx_stxvw4x: 16534 case Intrinsic::ppc_vsx_stxvd2x_be: 16535 case Intrinsic::ppc_vsx_stxvw4x_be: 16536 case Intrinsic::ppc_vsx_stxvl: 16537 case Intrinsic::ppc_vsx_stxvll: { 16538 EVT VT; 16539 switch (Intrinsic) { 16540 case Intrinsic::ppc_altivec_stvebx: 16541 VT = MVT::i8; 16542 break; 16543 case Intrinsic::ppc_altivec_stvehx: 16544 VT = MVT::i16; 16545 break; 16546 case Intrinsic::ppc_altivec_stvewx: 16547 VT = MVT::i32; 16548 break; 16549 case Intrinsic::ppc_vsx_stxvd2x: 16550 case Intrinsic::ppc_vsx_stxvd2x_be: 16551 VT = MVT::v2f64; 16552 break; 16553 default: 16554 VT = MVT::v4i32; 16555 break; 16556 } 16557 16558 Info.opc = ISD::INTRINSIC_VOID; 16559 Info.memVT = VT; 16560 Info.ptrVal = I.getArgOperand(1); 16561 Info.offset = -VT.getStoreSize()+1; 16562 Info.size = 2*VT.getStoreSize()-1; 16563 Info.align = Align(1); 16564 Info.flags = MachineMemOperand::MOStore; 16565 return true; 16566 } 16567 default: 16568 break; 16569 } 16570 16571 return false; 16572 } 16573 16574 /// It returns EVT::Other if the type should be determined using generic 16575 /// target-independent logic. 16576 EVT PPCTargetLowering::getOptimalMemOpType( 16577 const MemOp &Op, const AttributeList &FuncAttributes) const { 16578 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 16579 // We should use Altivec/VSX loads and stores when available. For unaligned 16580 // addresses, unaligned VSX loads are only fast starting with the P8. 16581 if (Subtarget.hasAltivec() && Op.size() >= 16 && 16582 (Op.isAligned(Align(16)) || 16583 ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 16584 return MVT::v4i32; 16585 } 16586 16587 if (Subtarget.isPPC64()) { 16588 return MVT::i64; 16589 } 16590 16591 return MVT::i32; 16592 } 16593 16594 /// Returns true if it is beneficial to convert a load of a constant 16595 /// to just the constant itself. 16596 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 16597 Type *Ty) const { 16598 assert(Ty->isIntegerTy()); 16599 16600 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 16601 return !(BitSize == 0 || BitSize > 64); 16602 } 16603 16604 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 16605 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 16606 return false; 16607 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 16608 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 16609 return NumBits1 == 64 && NumBits2 == 32; 16610 } 16611 16612 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 16613 if (!VT1.isInteger() || !VT2.isInteger()) 16614 return false; 16615 unsigned NumBits1 = VT1.getSizeInBits(); 16616 unsigned NumBits2 = VT2.getSizeInBits(); 16617 return NumBits1 == 64 && NumBits2 == 32; 16618 } 16619 16620 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 16621 // Generally speaking, zexts are not free, but they are free when they can be 16622 // folded with other operations. 16623 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 16624 EVT MemVT = LD->getMemoryVT(); 16625 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 16626 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 16627 (LD->getExtensionType() == ISD::NON_EXTLOAD || 16628 LD->getExtensionType() == ISD::ZEXTLOAD)) 16629 return true; 16630 } 16631 16632 // FIXME: Add other cases... 16633 // - 32-bit shifts with a zext to i64 16634 // - zext after ctlz, bswap, etc. 16635 // - zext after and by a constant mask 16636 16637 return TargetLowering::isZExtFree(Val, VT2); 16638 } 16639 16640 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 16641 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 16642 "invalid fpext types"); 16643 // Extending to float128 is not free. 16644 if (DestVT == MVT::f128) 16645 return false; 16646 return true; 16647 } 16648 16649 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 16650 return isInt<16>(Imm) || isUInt<16>(Imm); 16651 } 16652 16653 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 16654 return isInt<16>(Imm) || isUInt<16>(Imm); 16655 } 16656 16657 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align, 16658 MachineMemOperand::Flags, 16659 bool *Fast) const { 16660 if (DisablePPCUnaligned) 16661 return false; 16662 16663 // PowerPC supports unaligned memory access for simple non-vector types. 16664 // Although accessing unaligned addresses is not as efficient as accessing 16665 // aligned addresses, it is generally more efficient than manual expansion, 16666 // and generally only traps for software emulation when crossing page 16667 // boundaries. 16668 16669 if (!VT.isSimple()) 16670 return false; 16671 16672 if (VT.isFloatingPoint() && !VT.isVector() && 16673 !Subtarget.allowsUnalignedFPAccess()) 16674 return false; 16675 16676 if (VT.getSimpleVT().isVector()) { 16677 if (Subtarget.hasVSX()) { 16678 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 16679 VT != MVT::v4f32 && VT != MVT::v4i32) 16680 return false; 16681 } else { 16682 return false; 16683 } 16684 } 16685 16686 if (VT == MVT::ppcf128) 16687 return false; 16688 16689 if (Fast) 16690 *Fast = true; 16691 16692 return true; 16693 } 16694 16695 bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, 16696 SDValue C) const { 16697 // Check integral scalar types. 16698 if (!VT.isScalarInteger()) 16699 return false; 16700 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { 16701 if (!ConstNode->getAPIntValue().isSignedIntN(64)) 16702 return false; 16703 // This transformation will generate >= 2 operations. But the following 16704 // cases will generate <= 2 instructions during ISEL. So exclude them. 16705 // 1. If the constant multiplier fits 16 bits, it can be handled by one 16706 // HW instruction, ie. MULLI 16707 // 2. If the multiplier after shifted fits 16 bits, an extra shift 16708 // instruction is needed than case 1, ie. MULLI and RLDICR 16709 int64_t Imm = ConstNode->getSExtValue(); 16710 unsigned Shift = countTrailingZeros<uint64_t>(Imm); 16711 Imm >>= Shift; 16712 if (isInt<16>(Imm)) 16713 return false; 16714 uint64_t UImm = static_cast<uint64_t>(Imm); 16715 if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) || 16716 isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm)) 16717 return true; 16718 } 16719 return false; 16720 } 16721 16722 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 16723 EVT VT) const { 16724 return isFMAFasterThanFMulAndFAdd( 16725 MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext())); 16726 } 16727 16728 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F, 16729 Type *Ty) const { 16730 if (Subtarget.hasSPE()) 16731 return false; 16732 switch (Ty->getScalarType()->getTypeID()) { 16733 case Type::FloatTyID: 16734 case Type::DoubleTyID: 16735 return true; 16736 case Type::FP128TyID: 16737 return Subtarget.hasP9Vector(); 16738 default: 16739 return false; 16740 } 16741 } 16742 16743 // FIXME: add more patterns which are not profitable to hoist. 16744 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const { 16745 if (!I->hasOneUse()) 16746 return true; 16747 16748 Instruction *User = I->user_back(); 16749 assert(User && "A single use instruction with no uses."); 16750 16751 switch (I->getOpcode()) { 16752 case Instruction::FMul: { 16753 // Don't break FMA, PowerPC prefers FMA. 16754 if (User->getOpcode() != Instruction::FSub && 16755 User->getOpcode() != Instruction::FAdd) 16756 return true; 16757 16758 const TargetOptions &Options = getTargetMachine().Options; 16759 const Function *F = I->getFunction(); 16760 const DataLayout &DL = F->getParent()->getDataLayout(); 16761 Type *Ty = User->getOperand(0)->getType(); 16762 16763 return !( 16764 isFMAFasterThanFMulAndFAdd(*F, Ty) && 16765 isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) && 16766 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath)); 16767 } 16768 case Instruction::Load: { 16769 // Don't break "store (load float*)" pattern, this pattern will be combined 16770 // to "store (load int32)" in later InstCombine pass. See function 16771 // combineLoadToOperationType. On PowerPC, loading a float point takes more 16772 // cycles than loading a 32 bit integer. 16773 LoadInst *LI = cast<LoadInst>(I); 16774 // For the loads that combineLoadToOperationType does nothing, like 16775 // ordered load, it should be profitable to hoist them. 16776 // For swifterror load, it can only be used for pointer to pointer type, so 16777 // later type check should get rid of this case. 16778 if (!LI->isUnordered()) 16779 return true; 16780 16781 if (User->getOpcode() != Instruction::Store) 16782 return true; 16783 16784 if (I->getType()->getTypeID() != Type::FloatTyID) 16785 return true; 16786 16787 return false; 16788 } 16789 default: 16790 return true; 16791 } 16792 return true; 16793 } 16794 16795 const MCPhysReg * 16796 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 16797 // LR is a callee-save register, but we must treat it as clobbered by any call 16798 // site. Hence we include LR in the scratch registers, which are in turn added 16799 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 16800 // to CTR, which is used by any indirect call. 16801 static const MCPhysReg ScratchRegs[] = { 16802 PPC::X12, PPC::LR8, PPC::CTR8, 0 16803 }; 16804 16805 return ScratchRegs; 16806 } 16807 16808 Register PPCTargetLowering::getExceptionPointerRegister( 16809 const Constant *PersonalityFn) const { 16810 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 16811 } 16812 16813 Register PPCTargetLowering::getExceptionSelectorRegister( 16814 const Constant *PersonalityFn) const { 16815 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 16816 } 16817 16818 bool 16819 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 16820 EVT VT , unsigned DefinedValues) const { 16821 if (VT == MVT::v2i64) 16822 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 16823 16824 if (Subtarget.hasVSX()) 16825 return true; 16826 16827 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 16828 } 16829 16830 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 16831 if (DisableILPPref || Subtarget.enableMachineScheduler()) 16832 return TargetLowering::getSchedulingPreference(N); 16833 16834 return Sched::ILP; 16835 } 16836 16837 // Create a fast isel object. 16838 FastISel * 16839 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 16840 const TargetLibraryInfo *LibInfo) const { 16841 return PPC::createFastISel(FuncInfo, LibInfo); 16842 } 16843 16844 // 'Inverted' means the FMA opcode after negating one multiplicand. 16845 // For example, (fma -a b c) = (fnmsub a b c) 16846 static unsigned invertFMAOpcode(unsigned Opc) { 16847 switch (Opc) { 16848 default: 16849 llvm_unreachable("Invalid FMA opcode for PowerPC!"); 16850 case ISD::FMA: 16851 return PPCISD::FNMSUB; 16852 case PPCISD::FNMSUB: 16853 return ISD::FMA; 16854 } 16855 } 16856 16857 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 16858 bool LegalOps, bool OptForSize, 16859 NegatibleCost &Cost, 16860 unsigned Depth) const { 16861 if (Depth > SelectionDAG::MaxRecursionDepth) 16862 return SDValue(); 16863 16864 unsigned Opc = Op.getOpcode(); 16865 EVT VT = Op.getValueType(); 16866 SDNodeFlags Flags = Op.getNode()->getFlags(); 16867 16868 switch (Opc) { 16869 case PPCISD::FNMSUB: 16870 if (!Op.hasOneUse() || !isTypeLegal(VT)) 16871 break; 16872 16873 const TargetOptions &Options = getTargetMachine().Options; 16874 SDValue N0 = Op.getOperand(0); 16875 SDValue N1 = Op.getOperand(1); 16876 SDValue N2 = Op.getOperand(2); 16877 SDLoc Loc(Op); 16878 16879 NegatibleCost N2Cost = NegatibleCost::Expensive; 16880 SDValue NegN2 = 16881 getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1); 16882 16883 if (!NegN2) 16884 return SDValue(); 16885 16886 // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c)) 16887 // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c)) 16888 // These transformations may change sign of zeroes. For example, 16889 // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1. 16890 if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) { 16891 // Try and choose the cheaper one to negate. 16892 NegatibleCost N0Cost = NegatibleCost::Expensive; 16893 SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize, 16894 N0Cost, Depth + 1); 16895 16896 NegatibleCost N1Cost = NegatibleCost::Expensive; 16897 SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize, 16898 N1Cost, Depth + 1); 16899 16900 if (NegN0 && N0Cost <= N1Cost) { 16901 Cost = std::min(N0Cost, N2Cost); 16902 return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags); 16903 } else if (NegN1) { 16904 Cost = std::min(N1Cost, N2Cost); 16905 return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags); 16906 } 16907 } 16908 16909 // (fneg (fnmsub a b c)) => (fma a b (fneg c)) 16910 if (isOperationLegal(ISD::FMA, VT)) { 16911 Cost = N2Cost; 16912 return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags); 16913 } 16914 16915 break; 16916 } 16917 16918 return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize, 16919 Cost, Depth); 16920 } 16921 16922 // Override to enable LOAD_STACK_GUARD lowering on Linux. 16923 bool PPCTargetLowering::useLoadStackGuardNode() const { 16924 if (!Subtarget.isTargetLinux()) 16925 return TargetLowering::useLoadStackGuardNode(); 16926 return true; 16927 } 16928 16929 // Override to disable global variable loading on Linux and insert AIX canary 16930 // word declaration. 16931 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 16932 if (Subtarget.isAIXABI()) { 16933 M.getOrInsertGlobal(AIXSSPCanaryWordName, 16934 Type::getInt8PtrTy(M.getContext())); 16935 return; 16936 } 16937 if (!Subtarget.isTargetLinux()) 16938 return TargetLowering::insertSSPDeclarations(M); 16939 } 16940 16941 Value *PPCTargetLowering::getSDagStackGuard(const Module &M) const { 16942 if (Subtarget.isAIXABI()) 16943 return M.getGlobalVariable(AIXSSPCanaryWordName); 16944 return TargetLowering::getSDagStackGuard(M); 16945 } 16946 16947 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 16948 bool ForCodeSize) const { 16949 if (!VT.isSimple() || !Subtarget.hasVSX()) 16950 return false; 16951 16952 switch(VT.getSimpleVT().SimpleTy) { 16953 default: 16954 // For FP types that are currently not supported by PPC backend, return 16955 // false. Examples: f16, f80. 16956 return false; 16957 case MVT::f32: 16958 case MVT::f64: 16959 if (Subtarget.hasPrefixInstrs()) { 16960 // we can materialize all immediatess via XXSPLTI32DX and XXSPLTIDP. 16961 return true; 16962 } 16963 LLVM_FALLTHROUGH; 16964 case MVT::ppcf128: 16965 return Imm.isPosZero(); 16966 } 16967 } 16968 16969 // For vector shift operation op, fold 16970 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 16971 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 16972 SelectionDAG &DAG) { 16973 SDValue N0 = N->getOperand(0); 16974 SDValue N1 = N->getOperand(1); 16975 EVT VT = N0.getValueType(); 16976 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 16977 unsigned Opcode = N->getOpcode(); 16978 unsigned TargetOpcode; 16979 16980 switch (Opcode) { 16981 default: 16982 llvm_unreachable("Unexpected shift operation"); 16983 case ISD::SHL: 16984 TargetOpcode = PPCISD::SHL; 16985 break; 16986 case ISD::SRL: 16987 TargetOpcode = PPCISD::SRL; 16988 break; 16989 case ISD::SRA: 16990 TargetOpcode = PPCISD::SRA; 16991 break; 16992 } 16993 16994 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 16995 N1->getOpcode() == ISD::AND) 16996 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 16997 if (Mask->getZExtValue() == OpSizeInBits - 1) 16998 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 16999 17000 return SDValue(); 17001 } 17002 17003 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 17004 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 17005 return Value; 17006 17007 SDValue N0 = N->getOperand(0); 17008 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 17009 if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() || 17010 N0.getOpcode() != ISD::SIGN_EXTEND || 17011 N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr || 17012 N->getValueType(0) != MVT::i64) 17013 return SDValue(); 17014 17015 // We can't save an operation here if the value is already extended, and 17016 // the existing shift is easier to combine. 17017 SDValue ExtsSrc = N0.getOperand(0); 17018 if (ExtsSrc.getOpcode() == ISD::TRUNCATE && 17019 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext) 17020 return SDValue(); 17021 17022 SDLoc DL(N0); 17023 SDValue ShiftBy = SDValue(CN1, 0); 17024 // We want the shift amount to be i32 on the extswli, but the shift could 17025 // have an i64. 17026 if (ShiftBy.getValueType() == MVT::i64) 17027 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32); 17028 17029 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0), 17030 ShiftBy); 17031 } 17032 17033 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 17034 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 17035 return Value; 17036 17037 return SDValue(); 17038 } 17039 17040 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 17041 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 17042 return Value; 17043 17044 return SDValue(); 17045 } 17046 17047 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1)) 17048 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0)) 17049 // When C is zero, the equation (addi Z, -C) can be simplified to Z 17050 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types 17051 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, 17052 const PPCSubtarget &Subtarget) { 17053 if (!Subtarget.isPPC64()) 17054 return SDValue(); 17055 17056 SDValue LHS = N->getOperand(0); 17057 SDValue RHS = N->getOperand(1); 17058 17059 auto isZextOfCompareWithConstant = [](SDValue Op) { 17060 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() || 17061 Op.getValueType() != MVT::i64) 17062 return false; 17063 17064 SDValue Cmp = Op.getOperand(0); 17065 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() || 17066 Cmp.getOperand(0).getValueType() != MVT::i64) 17067 return false; 17068 17069 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) { 17070 int64_t NegConstant = 0 - Constant->getSExtValue(); 17071 // Due to the limitations of the addi instruction, 17072 // -C is required to be [-32768, 32767]. 17073 return isInt<16>(NegConstant); 17074 } 17075 17076 return false; 17077 }; 17078 17079 bool LHSHasPattern = isZextOfCompareWithConstant(LHS); 17080 bool RHSHasPattern = isZextOfCompareWithConstant(RHS); 17081 17082 // If there is a pattern, canonicalize a zext operand to the RHS. 17083 if (LHSHasPattern && !RHSHasPattern) 17084 std::swap(LHS, RHS); 17085 else if (!LHSHasPattern && !RHSHasPattern) 17086 return SDValue(); 17087 17088 SDLoc DL(N); 17089 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue); 17090 SDValue Cmp = RHS.getOperand(0); 17091 SDValue Z = Cmp.getOperand(0); 17092 auto *Constant = cast<ConstantSDNode>(Cmp.getOperand(1)); 17093 int64_t NegConstant = 0 - Constant->getSExtValue(); 17094 17095 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) { 17096 default: break; 17097 case ISD::SETNE: { 17098 // when C == 0 17099 // --> addze X, (addic Z, -1).carry 17100 // / 17101 // add X, (zext(setne Z, C))-- 17102 // \ when -32768 <= -C <= 32767 && C != 0 17103 // --> addze X, (addic (addi Z, -C), -1).carry 17104 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 17105 DAG.getConstant(NegConstant, DL, MVT::i64)); 17106 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 17107 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 17108 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64)); 17109 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 17110 SDValue(Addc.getNode(), 1)); 17111 } 17112 case ISD::SETEQ: { 17113 // when C == 0 17114 // --> addze X, (subfic Z, 0).carry 17115 // / 17116 // add X, (zext(sete Z, C))-- 17117 // \ when -32768 <= -C <= 32767 && C != 0 17118 // --> addze X, (subfic (addi Z, -C), 0).carry 17119 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 17120 DAG.getConstant(NegConstant, DL, MVT::i64)); 17121 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 17122 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 17123 DAG.getConstant(0, DL, MVT::i64), AddOrZ); 17124 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 17125 SDValue(Subc.getNode(), 1)); 17126 } 17127 } 17128 17129 return SDValue(); 17130 } 17131 17132 // Transform 17133 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to 17134 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2)) 17135 // In this case both C1 and C2 must be known constants. 17136 // C1+C2 must fit into a 34 bit signed integer. 17137 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG, 17138 const PPCSubtarget &Subtarget) { 17139 if (!Subtarget.isUsingPCRelativeCalls()) 17140 return SDValue(); 17141 17142 // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node. 17143 // If we find that node try to cast the Global Address and the Constant. 17144 SDValue LHS = N->getOperand(0); 17145 SDValue RHS = N->getOperand(1); 17146 17147 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR) 17148 std::swap(LHS, RHS); 17149 17150 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR) 17151 return SDValue(); 17152 17153 // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node. 17154 GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0)); 17155 ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS); 17156 17157 // Check that both casts succeeded. 17158 if (!GSDN || !ConstNode) 17159 return SDValue(); 17160 17161 int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue(); 17162 SDLoc DL(GSDN); 17163 17164 // The signed int offset needs to fit in 34 bits. 17165 if (!isInt<34>(NewOffset)) 17166 return SDValue(); 17167 17168 // The new global address is a copy of the old global address except 17169 // that it has the updated Offset. 17170 SDValue GA = 17171 DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0), 17172 NewOffset, GSDN->getTargetFlags()); 17173 SDValue MatPCRel = 17174 DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA); 17175 return MatPCRel; 17176 } 17177 17178 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const { 17179 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget)) 17180 return Value; 17181 17182 if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget)) 17183 return Value; 17184 17185 return SDValue(); 17186 } 17187 17188 // Detect TRUNCATE operations on bitcasts of float128 values. 17189 // What we are looking for here is the situtation where we extract a subset 17190 // of bits from a 128 bit float. 17191 // This can be of two forms: 17192 // 1) BITCAST of f128 feeding TRUNCATE 17193 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE 17194 // The reason this is required is because we do not have a legal i128 type 17195 // and so we want to prevent having to store the f128 and then reload part 17196 // of it. 17197 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N, 17198 DAGCombinerInfo &DCI) const { 17199 // If we are using CRBits then try that first. 17200 if (Subtarget.useCRBits()) { 17201 // Check if CRBits did anything and return that if it did. 17202 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI)) 17203 return CRTruncValue; 17204 } 17205 17206 SDLoc dl(N); 17207 SDValue Op0 = N->getOperand(0); 17208 17209 // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b) 17210 if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) { 17211 EVT VT = N->getValueType(0); 17212 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 17213 return SDValue(); 17214 SDValue Sub = Op0.getOperand(0); 17215 if (Sub.getOpcode() == ISD::SUB) { 17216 SDValue SubOp0 = Sub.getOperand(0); 17217 SDValue SubOp1 = Sub.getOperand(1); 17218 if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) && 17219 (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) { 17220 return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0), 17221 SubOp1.getOperand(0), 17222 DCI.DAG.getTargetConstant(0, dl, MVT::i32)); 17223 } 17224 } 17225 } 17226 17227 // Looking for a truncate of i128 to i64. 17228 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64) 17229 return SDValue(); 17230 17231 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0; 17232 17233 // SRL feeding TRUNCATE. 17234 if (Op0.getOpcode() == ISD::SRL) { 17235 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 17236 // The right shift has to be by 64 bits. 17237 if (!ConstNode || ConstNode->getZExtValue() != 64) 17238 return SDValue(); 17239 17240 // Switch the element number to extract. 17241 EltToExtract = EltToExtract ? 0 : 1; 17242 // Update Op0 past the SRL. 17243 Op0 = Op0.getOperand(0); 17244 } 17245 17246 // BITCAST feeding a TRUNCATE possibly via SRL. 17247 if (Op0.getOpcode() == ISD::BITCAST && 17248 Op0.getValueType() == MVT::i128 && 17249 Op0.getOperand(0).getValueType() == MVT::f128) { 17250 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0)); 17251 return DCI.DAG.getNode( 17252 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast, 17253 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32)); 17254 } 17255 return SDValue(); 17256 } 17257 17258 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const { 17259 SelectionDAG &DAG = DCI.DAG; 17260 17261 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1)); 17262 if (!ConstOpOrElement) 17263 return SDValue(); 17264 17265 // An imul is usually smaller than the alternative sequence for legal type. 17266 if (DAG.getMachineFunction().getFunction().hasMinSize() && 17267 isOperationLegal(ISD::MUL, N->getValueType(0))) 17268 return SDValue(); 17269 17270 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool { 17271 switch (this->Subtarget.getCPUDirective()) { 17272 default: 17273 // TODO: enhance the condition for subtarget before pwr8 17274 return false; 17275 case PPC::DIR_PWR8: 17276 // type mul add shl 17277 // scalar 4 1 1 17278 // vector 7 2 2 17279 return true; 17280 case PPC::DIR_PWR9: 17281 case PPC::DIR_PWR10: 17282 case PPC::DIR_PWR_FUTURE: 17283 // type mul add shl 17284 // scalar 5 2 2 17285 // vector 7 2 2 17286 17287 // The cycle RATIO of related operations are showed as a table above. 17288 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both 17289 // scalar and vector type. For 2 instrs patterns, add/sub + shl 17290 // are 4, it is always profitable; but for 3 instrs patterns 17291 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6. 17292 // So we should only do it for vector type. 17293 return IsAddOne && IsNeg ? VT.isVector() : true; 17294 } 17295 }; 17296 17297 EVT VT = N->getValueType(0); 17298 SDLoc DL(N); 17299 17300 const APInt &MulAmt = ConstOpOrElement->getAPIntValue(); 17301 bool IsNeg = MulAmt.isNegative(); 17302 APInt MulAmtAbs = MulAmt.abs(); 17303 17304 if ((MulAmtAbs - 1).isPowerOf2()) { 17305 // (mul x, 2^N + 1) => (add (shl x, N), x) 17306 // (mul x, -(2^N + 1)) => -(add (shl x, N), x) 17307 17308 if (!IsProfitable(IsNeg, true, VT)) 17309 return SDValue(); 17310 17311 SDValue Op0 = N->getOperand(0); 17312 SDValue Op1 = 17313 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 17314 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT)); 17315 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); 17316 17317 if (!IsNeg) 17318 return Res; 17319 17320 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res); 17321 } else if ((MulAmtAbs + 1).isPowerOf2()) { 17322 // (mul x, 2^N - 1) => (sub (shl x, N), x) 17323 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 17324 17325 if (!IsProfitable(IsNeg, false, VT)) 17326 return SDValue(); 17327 17328 SDValue Op0 = N->getOperand(0); 17329 SDValue Op1 = 17330 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 17331 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT)); 17332 17333 if (!IsNeg) 17334 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0); 17335 else 17336 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1); 17337 17338 } else { 17339 return SDValue(); 17340 } 17341 } 17342 17343 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this 17344 // in combiner since we need to check SD flags and other subtarget features. 17345 SDValue PPCTargetLowering::combineFMALike(SDNode *N, 17346 DAGCombinerInfo &DCI) const { 17347 SDValue N0 = N->getOperand(0); 17348 SDValue N1 = N->getOperand(1); 17349 SDValue N2 = N->getOperand(2); 17350 SDNodeFlags Flags = N->getFlags(); 17351 EVT VT = N->getValueType(0); 17352 SelectionDAG &DAG = DCI.DAG; 17353 const TargetOptions &Options = getTargetMachine().Options; 17354 unsigned Opc = N->getOpcode(); 17355 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize(); 17356 bool LegalOps = !DCI.isBeforeLegalizeOps(); 17357 SDLoc Loc(N); 17358 17359 if (!isOperationLegal(ISD::FMA, VT)) 17360 return SDValue(); 17361 17362 // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0 17363 // since (fnmsub a b c)=-0 while c-ab=+0. 17364 if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath) 17365 return SDValue(); 17366 17367 // (fma (fneg a) b c) => (fnmsub a b c) 17368 // (fnmsub (fneg a) b c) => (fma a b c) 17369 if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize)) 17370 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags); 17371 17372 // (fma a (fneg b) c) => (fnmsub a b c) 17373 // (fnmsub a (fneg b) c) => (fma a b c) 17374 if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize)) 17375 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags); 17376 17377 return SDValue(); 17378 } 17379 17380 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 17381 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 17382 if (!Subtarget.is64BitELFABI()) 17383 return false; 17384 17385 // If not a tail call then no need to proceed. 17386 if (!CI->isTailCall()) 17387 return false; 17388 17389 // If sibling calls have been disabled and tail-calls aren't guaranteed 17390 // there is no reason to duplicate. 17391 auto &TM = getTargetMachine(); 17392 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 17393 return false; 17394 17395 // Can't tail call a function called indirectly, or if it has variadic args. 17396 const Function *Callee = CI->getCalledFunction(); 17397 if (!Callee || Callee->isVarArg()) 17398 return false; 17399 17400 // Make sure the callee and caller calling conventions are eligible for tco. 17401 const Function *Caller = CI->getParent()->getParent(); 17402 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 17403 CI->getCallingConv())) 17404 return false; 17405 17406 // If the function is local then we have a good chance at tail-calling it 17407 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 17408 } 17409 17410 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 17411 if (!Subtarget.hasVSX()) 17412 return false; 17413 if (Subtarget.hasP9Vector() && VT == MVT::f128) 17414 return true; 17415 return VT == MVT::f32 || VT == MVT::f64 || 17416 VT == MVT::v4f32 || VT == MVT::v2f64; 17417 } 17418 17419 bool PPCTargetLowering:: 17420 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 17421 const Value *Mask = AndI.getOperand(1); 17422 // If the mask is suitable for andi. or andis. we should sink the and. 17423 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) { 17424 // Can't handle constants wider than 64-bits. 17425 if (CI->getBitWidth() > 64) 17426 return false; 17427 int64_t ConstVal = CI->getZExtValue(); 17428 return isUInt<16>(ConstVal) || 17429 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF)); 17430 } 17431 17432 // For non-constant masks, we can always use the record-form and. 17433 return true; 17434 } 17435 17436 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0) 17437 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0) 17438 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0) 17439 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0) 17440 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32 17441 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const { 17442 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here"); 17443 assert(Subtarget.hasP9Altivec() && 17444 "Only combine this when P9 altivec supported!"); 17445 EVT VT = N->getValueType(0); 17446 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 17447 return SDValue(); 17448 17449 SelectionDAG &DAG = DCI.DAG; 17450 SDLoc dl(N); 17451 if (N->getOperand(0).getOpcode() == ISD::SUB) { 17452 // Even for signed integers, if it's known to be positive (as signed 17453 // integer) due to zero-extended inputs. 17454 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode(); 17455 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode(); 17456 if ((SubOpcd0 == ISD::ZERO_EXTEND || 17457 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) && 17458 (SubOpcd1 == ISD::ZERO_EXTEND || 17459 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) { 17460 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 17461 N->getOperand(0)->getOperand(0), 17462 N->getOperand(0)->getOperand(1), 17463 DAG.getTargetConstant(0, dl, MVT::i32)); 17464 } 17465 17466 // For type v4i32, it can be optimized with xvnegsp + vabsduw 17467 if (N->getOperand(0).getValueType() == MVT::v4i32 && 17468 N->getOperand(0).hasOneUse()) { 17469 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 17470 N->getOperand(0)->getOperand(0), 17471 N->getOperand(0)->getOperand(1), 17472 DAG.getTargetConstant(1, dl, MVT::i32)); 17473 } 17474 } 17475 17476 return SDValue(); 17477 } 17478 17479 // For type v4i32/v8ii16/v16i8, transform 17480 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b) 17481 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b) 17482 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b) 17483 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b) 17484 SDValue PPCTargetLowering::combineVSelect(SDNode *N, 17485 DAGCombinerInfo &DCI) const { 17486 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here"); 17487 assert(Subtarget.hasP9Altivec() && 17488 "Only combine this when P9 altivec supported!"); 17489 17490 SelectionDAG &DAG = DCI.DAG; 17491 SDLoc dl(N); 17492 SDValue Cond = N->getOperand(0); 17493 SDValue TrueOpnd = N->getOperand(1); 17494 SDValue FalseOpnd = N->getOperand(2); 17495 EVT VT = N->getOperand(1).getValueType(); 17496 17497 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB || 17498 FalseOpnd.getOpcode() != ISD::SUB) 17499 return SDValue(); 17500 17501 // ABSD only available for type v4i32/v8i16/v16i8 17502 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 17503 return SDValue(); 17504 17505 // At least to save one more dependent computation 17506 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse())) 17507 return SDValue(); 17508 17509 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 17510 17511 // Can only handle unsigned comparison here 17512 switch (CC) { 17513 default: 17514 return SDValue(); 17515 case ISD::SETUGT: 17516 case ISD::SETUGE: 17517 break; 17518 case ISD::SETULT: 17519 case ISD::SETULE: 17520 std::swap(TrueOpnd, FalseOpnd); 17521 break; 17522 } 17523 17524 SDValue CmpOpnd1 = Cond.getOperand(0); 17525 SDValue CmpOpnd2 = Cond.getOperand(1); 17526 17527 // SETCC CmpOpnd1 CmpOpnd2 cond 17528 // TrueOpnd = CmpOpnd1 - CmpOpnd2 17529 // FalseOpnd = CmpOpnd2 - CmpOpnd1 17530 if (TrueOpnd.getOperand(0) == CmpOpnd1 && 17531 TrueOpnd.getOperand(1) == CmpOpnd2 && 17532 FalseOpnd.getOperand(0) == CmpOpnd2 && 17533 FalseOpnd.getOperand(1) == CmpOpnd1) { 17534 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(), 17535 CmpOpnd1, CmpOpnd2, 17536 DAG.getTargetConstant(0, dl, MVT::i32)); 17537 } 17538 17539 return SDValue(); 17540 } 17541 17542 /// getAddrModeForFlags - Based on the set of address flags, select the most 17543 /// optimal instruction format to match by. 17544 PPC::AddrMode PPCTargetLowering::getAddrModeForFlags(unsigned Flags) const { 17545 // This is not a node we should be handling here. 17546 if (Flags == PPC::MOF_None) 17547 return PPC::AM_None; 17548 // Unaligned D-Forms are tried first, followed by the aligned D-Forms. 17549 for (auto FlagSet : AddrModesMap.at(PPC::AM_DForm)) 17550 if ((Flags & FlagSet) == FlagSet) 17551 return PPC::AM_DForm; 17552 for (auto FlagSet : AddrModesMap.at(PPC::AM_DSForm)) 17553 if ((Flags & FlagSet) == FlagSet) 17554 return PPC::AM_DSForm; 17555 for (auto FlagSet : AddrModesMap.at(PPC::AM_DQForm)) 17556 if ((Flags & FlagSet) == FlagSet) 17557 return PPC::AM_DQForm; 17558 for (auto FlagSet : AddrModesMap.at(PPC::AM_PrefixDForm)) 17559 if ((Flags & FlagSet) == FlagSet) 17560 return PPC::AM_PrefixDForm; 17561 // If no other forms are selected, return an X-Form as it is the most 17562 // general addressing mode. 17563 return PPC::AM_XForm; 17564 } 17565 17566 /// Set alignment flags based on whether or not the Frame Index is aligned. 17567 /// Utilized when computing flags for address computation when selecting 17568 /// load and store instructions. 17569 static void setAlignFlagsForFI(SDValue N, unsigned &FlagSet, 17570 SelectionDAG &DAG) { 17571 bool IsAdd = ((N.getOpcode() == ISD::ADD) || (N.getOpcode() == ISD::OR)); 17572 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(IsAdd ? N.getOperand(0) : N); 17573 if (!FI) 17574 return; 17575 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 17576 unsigned FrameIndexAlign = MFI.getObjectAlign(FI->getIndex()).value(); 17577 // If this is (add $FI, $S16Imm), the alignment flags are already set 17578 // based on the immediate. We just need to clear the alignment flags 17579 // if the FI alignment is weaker. 17580 if ((FrameIndexAlign % 4) != 0) 17581 FlagSet &= ~PPC::MOF_RPlusSImm16Mult4; 17582 if ((FrameIndexAlign % 16) != 0) 17583 FlagSet &= ~PPC::MOF_RPlusSImm16Mult16; 17584 // If the address is a plain FrameIndex, set alignment flags based on 17585 // FI alignment. 17586 if (!IsAdd) { 17587 if ((FrameIndexAlign % 4) == 0) 17588 FlagSet |= PPC::MOF_RPlusSImm16Mult4; 17589 if ((FrameIndexAlign % 16) == 0) 17590 FlagSet |= PPC::MOF_RPlusSImm16Mult16; 17591 } 17592 } 17593 17594 /// Given a node, compute flags that are used for address computation when 17595 /// selecting load and store instructions. The flags computed are stored in 17596 /// FlagSet. This function takes into account whether the node is a constant, 17597 /// an ADD, OR, or a constant, and computes the address flags accordingly. 17598 static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet, 17599 SelectionDAG &DAG) { 17600 // Set the alignment flags for the node depending on if the node is 17601 // 4-byte or 16-byte aligned. 17602 auto SetAlignFlagsForImm = [&](uint64_t Imm) { 17603 if ((Imm & 0x3) == 0) 17604 FlagSet |= PPC::MOF_RPlusSImm16Mult4; 17605 if ((Imm & 0xf) == 0) 17606 FlagSet |= PPC::MOF_RPlusSImm16Mult16; 17607 }; 17608 17609 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 17610 // All 32-bit constants can be computed as LIS + Disp. 17611 const APInt &ConstImm = CN->getAPIntValue(); 17612 if (ConstImm.isSignedIntN(32)) { // Flag to handle 32-bit constants. 17613 FlagSet |= PPC::MOF_AddrIsSImm32; 17614 SetAlignFlagsForImm(ConstImm.getZExtValue()); 17615 setAlignFlagsForFI(N, FlagSet, DAG); 17616 } 17617 if (ConstImm.isSignedIntN(34)) // Flag to handle 34-bit constants. 17618 FlagSet |= PPC::MOF_RPlusSImm34; 17619 else // Let constant materialization handle large constants. 17620 FlagSet |= PPC::MOF_NotAddNorCst; 17621 } else if (N.getOpcode() == ISD::ADD || provablyDisjointOr(DAG, N)) { 17622 // This address can be represented as an addition of: 17623 // - Register + Imm16 (possibly a multiple of 4/16) 17624 // - Register + Imm34 17625 // - Register + PPCISD::Lo 17626 // - Register + Register 17627 // In any case, we won't have to match this as Base + Zero. 17628 SDValue RHS = N.getOperand(1); 17629 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) { 17630 const APInt &ConstImm = CN->getAPIntValue(); 17631 if (ConstImm.isSignedIntN(16)) { 17632 FlagSet |= PPC::MOF_RPlusSImm16; // Signed 16-bit immediates. 17633 SetAlignFlagsForImm(ConstImm.getZExtValue()); 17634 setAlignFlagsForFI(N, FlagSet, DAG); 17635 } 17636 if (ConstImm.isSignedIntN(34)) 17637 FlagSet |= PPC::MOF_RPlusSImm34; // Signed 34-bit immediates. 17638 else 17639 FlagSet |= PPC::MOF_RPlusR; // Register. 17640 } else if (RHS.getOpcode() == PPCISD::Lo && 17641 !cast<ConstantSDNode>(RHS.getOperand(1))->getZExtValue()) 17642 FlagSet |= PPC::MOF_RPlusLo; // PPCISD::Lo. 17643 else 17644 FlagSet |= PPC::MOF_RPlusR; 17645 } else { // The address computation is not a constant or an addition. 17646 setAlignFlagsForFI(N, FlagSet, DAG); 17647 FlagSet |= PPC::MOF_NotAddNorCst; 17648 } 17649 } 17650 17651 static bool isPCRelNode(SDValue N) { 17652 return (N.getOpcode() == PPCISD::MAT_PCREL_ADDR || 17653 isValidPCRelNode<ConstantPoolSDNode>(N) || 17654 isValidPCRelNode<GlobalAddressSDNode>(N) || 17655 isValidPCRelNode<JumpTableSDNode>(N) || 17656 isValidPCRelNode<BlockAddressSDNode>(N)); 17657 } 17658 17659 /// computeMOFlags - Given a node N and it's Parent (a MemSDNode), compute 17660 /// the address flags of the load/store instruction that is to be matched. 17661 unsigned PPCTargetLowering::computeMOFlags(const SDNode *Parent, SDValue N, 17662 SelectionDAG &DAG) const { 17663 unsigned FlagSet = PPC::MOF_None; 17664 17665 // Compute subtarget flags. 17666 if (!Subtarget.hasP9Vector()) 17667 FlagSet |= PPC::MOF_SubtargetBeforeP9; 17668 else { 17669 FlagSet |= PPC::MOF_SubtargetP9; 17670 if (Subtarget.hasPrefixInstrs()) 17671 FlagSet |= PPC::MOF_SubtargetP10; 17672 } 17673 if (Subtarget.hasSPE()) 17674 FlagSet |= PPC::MOF_SubtargetSPE; 17675 17676 // Check if we have a PCRel node and return early. 17677 if ((FlagSet & PPC::MOF_SubtargetP10) && isPCRelNode(N)) 17678 return FlagSet; 17679 17680 // If the node is the paired load/store intrinsics, compute flags for 17681 // address computation and return early. 17682 unsigned ParentOp = Parent->getOpcode(); 17683 if (Subtarget.isISA3_1() && ((ParentOp == ISD::INTRINSIC_W_CHAIN) || 17684 (ParentOp == ISD::INTRINSIC_VOID))) { 17685 unsigned ID = cast<ConstantSDNode>(Parent->getOperand(1))->getZExtValue(); 17686 if ((ID == Intrinsic::ppc_vsx_lxvp) || (ID == Intrinsic::ppc_vsx_stxvp)) { 17687 SDValue IntrinOp = (ID == Intrinsic::ppc_vsx_lxvp) 17688 ? Parent->getOperand(2) 17689 : Parent->getOperand(3); 17690 computeFlagsForAddressComputation(IntrinOp, FlagSet, DAG); 17691 FlagSet |= PPC::MOF_Vector; 17692 return FlagSet; 17693 } 17694 } 17695 17696 // Mark this as something we don't want to handle here if it is atomic 17697 // or pre-increment instruction. 17698 if (const LSBaseSDNode *LSB = dyn_cast<LSBaseSDNode>(Parent)) 17699 if (LSB->isIndexed()) 17700 return PPC::MOF_None; 17701 17702 // Compute in-memory type flags. This is based on if there are scalars, 17703 // floats or vectors. 17704 const MemSDNode *MN = dyn_cast<MemSDNode>(Parent); 17705 assert(MN && "Parent should be a MemSDNode!"); 17706 EVT MemVT = MN->getMemoryVT(); 17707 unsigned Size = MemVT.getSizeInBits(); 17708 if (MemVT.isScalarInteger()) { 17709 assert(Size <= 128 && 17710 "Not expecting scalar integers larger than 16 bytes!"); 17711 if (Size < 32) 17712 FlagSet |= PPC::MOF_SubWordInt; 17713 else if (Size == 32) 17714 FlagSet |= PPC::MOF_WordInt; 17715 else 17716 FlagSet |= PPC::MOF_DoubleWordInt; 17717 } else if (MemVT.isVector() && !MemVT.isFloatingPoint()) { // Integer vectors. 17718 if (Size == 128) 17719 FlagSet |= PPC::MOF_Vector; 17720 else if (Size == 256) { 17721 assert(Subtarget.pairedVectorMemops() && 17722 "256-bit vectors are only available when paired vector memops is " 17723 "enabled!"); 17724 FlagSet |= PPC::MOF_Vector; 17725 } else 17726 llvm_unreachable("Not expecting illegal vectors!"); 17727 } else { // Floating point type: can be scalar, f128 or vector types. 17728 if (Size == 32 || Size == 64) 17729 FlagSet |= PPC::MOF_ScalarFloat; 17730 else if (MemVT == MVT::f128 || MemVT.isVector()) 17731 FlagSet |= PPC::MOF_Vector; 17732 else 17733 llvm_unreachable("Not expecting illegal scalar floats!"); 17734 } 17735 17736 // Compute flags for address computation. 17737 computeFlagsForAddressComputation(N, FlagSet, DAG); 17738 17739 // Compute type extension flags. 17740 if (const LoadSDNode *LN = dyn_cast<LoadSDNode>(Parent)) { 17741 switch (LN->getExtensionType()) { 17742 case ISD::SEXTLOAD: 17743 FlagSet |= PPC::MOF_SExt; 17744 break; 17745 case ISD::EXTLOAD: 17746 case ISD::ZEXTLOAD: 17747 FlagSet |= PPC::MOF_ZExt; 17748 break; 17749 case ISD::NON_EXTLOAD: 17750 FlagSet |= PPC::MOF_NoExt; 17751 break; 17752 } 17753 } else 17754 FlagSet |= PPC::MOF_NoExt; 17755 17756 // For integers, no extension is the same as zero extension. 17757 // We set the extension mode to zero extension so we don't have 17758 // to add separate entries in AddrModesMap for loads and stores. 17759 if (MemVT.isScalarInteger() && (FlagSet & PPC::MOF_NoExt)) { 17760 FlagSet |= PPC::MOF_ZExt; 17761 FlagSet &= ~PPC::MOF_NoExt; 17762 } 17763 17764 // If we don't have prefixed instructions, 34-bit constants should be 17765 // treated as PPC::MOF_NotAddNorCst so they can match D-Forms. 17766 bool IsNonP1034BitConst = 17767 ((PPC::MOF_RPlusSImm34 | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubtargetP10) & 17768 FlagSet) == PPC::MOF_RPlusSImm34; 17769 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::OR && 17770 IsNonP1034BitConst) 17771 FlagSet |= PPC::MOF_NotAddNorCst; 17772 17773 return FlagSet; 17774 } 17775 17776 /// SelectForceXFormMode - Given the specified address, force it to be 17777 /// represented as an indexed [r+r] operation (an XForm instruction). 17778 PPC::AddrMode PPCTargetLowering::SelectForceXFormMode(SDValue N, SDValue &Disp, 17779 SDValue &Base, 17780 SelectionDAG &DAG) const { 17781 17782 PPC::AddrMode Mode = PPC::AM_XForm; 17783 int16_t ForceXFormImm = 0; 17784 if (provablyDisjointOr(DAG, N) && 17785 !isIntS16Immediate(N.getOperand(1), ForceXFormImm)) { 17786 Disp = N.getOperand(0); 17787 Base = N.getOperand(1); 17788 return Mode; 17789 } 17790 17791 // If the address is the result of an add, we will utilize the fact that the 17792 // address calculation includes an implicit add. However, we can reduce 17793 // register pressure if we do not materialize a constant just for use as the 17794 // index register. We only get rid of the add if it is not an add of a 17795 // value and a 16-bit signed constant and both have a single use. 17796 if (N.getOpcode() == ISD::ADD && 17797 (!isIntS16Immediate(N.getOperand(1), ForceXFormImm) || 17798 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 17799 Disp = N.getOperand(0); 17800 Base = N.getOperand(1); 17801 return Mode; 17802 } 17803 17804 // Otherwise, use R0 as the base register. 17805 Disp = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 17806 N.getValueType()); 17807 Base = N; 17808 17809 return Mode; 17810 } 17811 17812 bool PPCTargetLowering::splitValueIntoRegisterParts( 17813 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 17814 unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const { 17815 EVT ValVT = Val.getValueType(); 17816 // If we are splitting a scalar integer into f64 parts (i.e. so they 17817 // can be placed into VFRC registers), we need to zero extend and 17818 // bitcast the values. This will ensure the value is placed into a 17819 // VSR using direct moves or stack operations as needed. 17820 if (PartVT == MVT::f64 && 17821 (ValVT == MVT::i32 || ValVT == MVT::i16 || ValVT == MVT::i8)) { 17822 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val); 17823 Val = DAG.getNode(ISD::BITCAST, DL, MVT::f64, Val); 17824 Parts[0] = Val; 17825 return true; 17826 } 17827 return false; 17828 } 17829 17830 SDValue PPCTargetLowering::lowerToLibCall(const char *LibCallName, SDValue Op, 17831 SelectionDAG &DAG) const { 17832 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 17833 TargetLowering::CallLoweringInfo CLI(DAG); 17834 EVT RetVT = Op.getValueType(); 17835 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 17836 SDValue Callee = 17837 DAG.getExternalSymbol(LibCallName, TLI.getPointerTy(DAG.getDataLayout())); 17838 bool SignExtend = TLI.shouldSignExtendTypeInLibCall(RetVT, false); 17839 TargetLowering::ArgListTy Args; 17840 TargetLowering::ArgListEntry Entry; 17841 for (const SDValue &N : Op->op_values()) { 17842 EVT ArgVT = N.getValueType(); 17843 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 17844 Entry.Node = N; 17845 Entry.Ty = ArgTy; 17846 Entry.IsSExt = TLI.shouldSignExtendTypeInLibCall(ArgVT, SignExtend); 17847 Entry.IsZExt = !Entry.IsSExt; 17848 Args.push_back(Entry); 17849 } 17850 17851 SDValue InChain = DAG.getEntryNode(); 17852 SDValue TCChain = InChain; 17853 const Function &F = DAG.getMachineFunction().getFunction(); 17854 bool isTailCall = 17855 TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) && 17856 (RetTy == F.getReturnType() || F.getReturnType()->isVoidTy()); 17857 if (isTailCall) 17858 InChain = TCChain; 17859 CLI.setDebugLoc(SDLoc(Op)) 17860 .setChain(InChain) 17861 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args)) 17862 .setTailCall(isTailCall) 17863 .setSExtResult(SignExtend) 17864 .setZExtResult(!SignExtend) 17865 .setIsPostTypeLegalization(true); 17866 return TLI.LowerCallTo(CLI).first; 17867 } 17868 17869 SDValue PPCTargetLowering::lowerLibCallBasedOnType( 17870 const char *LibCallFloatName, const char *LibCallDoubleName, SDValue Op, 17871 SelectionDAG &DAG) const { 17872 if (Op.getValueType() == MVT::f32) 17873 return lowerToLibCall(LibCallFloatName, Op, DAG); 17874 17875 if (Op.getValueType() == MVT::f64) 17876 return lowerToLibCall(LibCallDoubleName, Op, DAG); 17877 17878 return SDValue(); 17879 } 17880 17881 bool PPCTargetLowering::isLowringToMASSFiniteSafe(SDValue Op) const { 17882 SDNodeFlags Flags = Op.getNode()->getFlags(); 17883 return isLowringToMASSSafe(Op) && Flags.hasNoSignedZeros() && 17884 Flags.hasNoNaNs() && Flags.hasNoInfs(); 17885 } 17886 17887 bool PPCTargetLowering::isLowringToMASSSafe(SDValue Op) const { 17888 return Op.getNode()->getFlags().hasApproximateFuncs(); 17889 } 17890 17891 SDValue PPCTargetLowering::lowerLibCallBase(const char *LibCallDoubleName, 17892 const char *LibCallFloatName, 17893 const char *LibCallDoubleNameFinite, 17894 const char *LibCallFloatNameFinite, 17895 SDValue Op, 17896 SelectionDAG &DAG) const { 17897 if (!isLowringToMASSSafe(Op)) 17898 return SDValue(); 17899 17900 if (!isLowringToMASSFiniteSafe(Op)) 17901 return lowerLibCallBasedOnType(LibCallFloatName, LibCallDoubleName, Op, 17902 DAG); 17903 17904 return lowerLibCallBasedOnType(LibCallFloatNameFinite, 17905 LibCallDoubleNameFinite, Op, DAG); 17906 } 17907 17908 SDValue PPCTargetLowering::lowerPow(SDValue Op, SelectionDAG &DAG) const { 17909 return lowerLibCallBase("__xl_pow", "__xl_powf", "__xl_pow_finite", 17910 "__xl_powf_finite", Op, DAG); 17911 } 17912 17913 SDValue PPCTargetLowering::lowerSin(SDValue Op, SelectionDAG &DAG) const { 17914 return lowerLibCallBase("__xl_sin", "__xl_sinf", "__xl_sin_finite", 17915 "__xl_sinf_finite", Op, DAG); 17916 } 17917 17918 SDValue PPCTargetLowering::lowerCos(SDValue Op, SelectionDAG &DAG) const { 17919 return lowerLibCallBase("__xl_cos", "__xl_cosf", "__xl_cos_finite", 17920 "__xl_cosf_finite", Op, DAG); 17921 } 17922 17923 SDValue PPCTargetLowering::lowerLog(SDValue Op, SelectionDAG &DAG) const { 17924 return lowerLibCallBase("__xl_log", "__xl_logf", "__xl_log_finite", 17925 "__xl_logf_finite", Op, DAG); 17926 } 17927 17928 SDValue PPCTargetLowering::lowerLog10(SDValue Op, SelectionDAG &DAG) const { 17929 return lowerLibCallBase("__xl_log10", "__xl_log10f", "__xl_log10_finite", 17930 "__xl_log10f_finite", Op, DAG); 17931 } 17932 17933 SDValue PPCTargetLowering::lowerExp(SDValue Op, SelectionDAG &DAG) const { 17934 return lowerLibCallBase("__xl_exp", "__xl_expf", "__xl_exp_finite", 17935 "__xl_expf_finite", Op, DAG); 17936 } 17937 17938 // If we happen to match to an aligned D-Form, check if the Frame Index is 17939 // adequately aligned. If it is not, reset the mode to match to X-Form. 17940 static void setXFormForUnalignedFI(SDValue N, unsigned Flags, 17941 PPC::AddrMode &Mode) { 17942 if (!isa<FrameIndexSDNode>(N)) 17943 return; 17944 if ((Mode == PPC::AM_DSForm && !(Flags & PPC::MOF_RPlusSImm16Mult4)) || 17945 (Mode == PPC::AM_DQForm && !(Flags & PPC::MOF_RPlusSImm16Mult16))) 17946 Mode = PPC::AM_XForm; 17947 } 17948 17949 /// SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode), 17950 /// compute the address flags of the node, get the optimal address mode based 17951 /// on the flags, and set the Base and Disp based on the address mode. 17952 PPC::AddrMode PPCTargetLowering::SelectOptimalAddrMode(const SDNode *Parent, 17953 SDValue N, SDValue &Disp, 17954 SDValue &Base, 17955 SelectionDAG &DAG, 17956 MaybeAlign Align) const { 17957 SDLoc DL(Parent); 17958 17959 // Compute the address flags. 17960 unsigned Flags = computeMOFlags(Parent, N, DAG); 17961 17962 // Get the optimal address mode based on the Flags. 17963 PPC::AddrMode Mode = getAddrModeForFlags(Flags); 17964 17965 // If the address mode is DS-Form or DQ-Form, check if the FI is aligned. 17966 // Select an X-Form load if it is not. 17967 setXFormForUnalignedFI(N, Flags, Mode); 17968 17969 // Set the mode to PC-Relative addressing mode if we have a valid PC-Rel node. 17970 if ((Mode == PPC::AM_XForm) && isPCRelNode(N)) { 17971 assert(Subtarget.isUsingPCRelativeCalls() && 17972 "Must be using PC-Relative calls when a valid PC-Relative node is " 17973 "present!"); 17974 Mode = PPC::AM_PCRel; 17975 } 17976 17977 // Set Base and Disp accordingly depending on the address mode. 17978 switch (Mode) { 17979 case PPC::AM_DForm: 17980 case PPC::AM_DSForm: 17981 case PPC::AM_DQForm: { 17982 // This is a register plus a 16-bit immediate. The base will be the 17983 // register and the displacement will be the immediate unless it 17984 // isn't sufficiently aligned. 17985 if (Flags & PPC::MOF_RPlusSImm16) { 17986 SDValue Op0 = N.getOperand(0); 17987 SDValue Op1 = N.getOperand(1); 17988 int16_t Imm = cast<ConstantSDNode>(Op1)->getAPIntValue().getZExtValue(); 17989 if (!Align || isAligned(*Align, Imm)) { 17990 Disp = DAG.getTargetConstant(Imm, DL, N.getValueType()); 17991 Base = Op0; 17992 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op0)) { 17993 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 17994 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 17995 } 17996 break; 17997 } 17998 } 17999 // This is a register plus the @lo relocation. The base is the register 18000 // and the displacement is the global address. 18001 else if (Flags & PPC::MOF_RPlusLo) { 18002 Disp = N.getOperand(1).getOperand(0); // The global address. 18003 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 18004 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 18005 Disp.getOpcode() == ISD::TargetConstantPool || 18006 Disp.getOpcode() == ISD::TargetJumpTable); 18007 Base = N.getOperand(0); 18008 break; 18009 } 18010 // This is a constant address at most 32 bits. The base will be 18011 // zero or load-immediate-shifted and the displacement will be 18012 // the low 16 bits of the address. 18013 else if (Flags & PPC::MOF_AddrIsSImm32) { 18014 auto *CN = cast<ConstantSDNode>(N); 18015 EVT CNType = CN->getValueType(0); 18016 uint64_t CNImm = CN->getZExtValue(); 18017 // If this address fits entirely in a 16-bit sext immediate field, codegen 18018 // this as "d, 0". 18019 int16_t Imm; 18020 if (isIntS16Immediate(CN, Imm) && (!Align || isAligned(*Align, Imm))) { 18021 Disp = DAG.getTargetConstant(Imm, DL, CNType); 18022 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 18023 CNType); 18024 break; 18025 } 18026 // Handle 32-bit sext immediate with LIS + Addr mode. 18027 if ((CNType == MVT::i32 || isInt<32>(CNImm)) && 18028 (!Align || isAligned(*Align, CNImm))) { 18029 int32_t Addr = (int32_t)CNImm; 18030 // Otherwise, break this down into LIS + Disp. 18031 Disp = DAG.getTargetConstant((int16_t)Addr, DL, MVT::i32); 18032 Base = 18033 DAG.getTargetConstant((Addr - (int16_t)Addr) >> 16, DL, MVT::i32); 18034 uint32_t LIS = CNType == MVT::i32 ? PPC::LIS : PPC::LIS8; 18035 Base = SDValue(DAG.getMachineNode(LIS, DL, CNType, Base), 0); 18036 break; 18037 } 18038 } 18039 // Otherwise, the PPC:MOF_NotAdd flag is set. Load/Store is Non-foldable. 18040 Disp = DAG.getTargetConstant(0, DL, getPointerTy(DAG.getDataLayout())); 18041 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 18042 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 18043 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 18044 } else 18045 Base = N; 18046 break; 18047 } 18048 case PPC::AM_PrefixDForm: { 18049 int64_t Imm34 = 0; 18050 unsigned Opcode = N.getOpcode(); 18051 if (((Opcode == ISD::ADD) || (Opcode == ISD::OR)) && 18052 (isIntS34Immediate(N.getOperand(1), Imm34))) { 18053 // N is an Add/OR Node, and it's operand is a 34-bit signed immediate. 18054 Disp = DAG.getTargetConstant(Imm34, DL, N.getValueType()); 18055 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) 18056 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 18057 else 18058 Base = N.getOperand(0); 18059 } else if (isIntS34Immediate(N, Imm34)) { 18060 // The address is a 34-bit signed immediate. 18061 Disp = DAG.getTargetConstant(Imm34, DL, N.getValueType()); 18062 Base = DAG.getRegister(PPC::ZERO8, N.getValueType()); 18063 } 18064 break; 18065 } 18066 case PPC::AM_PCRel: { 18067 // When selecting PC-Relative instructions, "Base" is not utilized as 18068 // we select the address as [PC+imm]. 18069 Disp = N; 18070 break; 18071 } 18072 case PPC::AM_None: 18073 break; 18074 default: { // By default, X-Form is always available to be selected. 18075 // When a frame index is not aligned, we also match by XForm. 18076 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N); 18077 Base = FI ? N : N.getOperand(1); 18078 Disp = FI ? DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 18079 N.getValueType()) 18080 : N.getOperand(0); 18081 break; 18082 } 18083 } 18084 return Mode; 18085 } 18086 18087 CCAssignFn *PPCTargetLowering::ccAssignFnForCall(CallingConv::ID CC, 18088 bool Return, 18089 bool IsVarArg) const { 18090 switch (CC) { 18091 case CallingConv::Cold: 18092 return (Return ? RetCC_PPC_Cold : CC_PPC64_ELF_FIS); 18093 default: 18094 return CC_PPC64_ELF_FIS; 18095 } 18096 } 18097 18098 bool PPCTargetLowering::shouldInlineQuadwordAtomics() const { 18099 // TODO: 16-byte atomic type support for AIX is in progress; we should be able 18100 // to inline 16-byte atomic ops on AIX too in the future. 18101 return Subtarget.isPPC64() && 18102 (EnableQuadwordAtomics || !Subtarget.getTargetTriple().isOSAIX()) && 18103 Subtarget.hasQuadwordAtomics(); 18104 } 18105 18106 TargetLowering::AtomicExpansionKind 18107 PPCTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 18108 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 18109 if (shouldInlineQuadwordAtomics() && Size == 128) 18110 return AtomicExpansionKind::MaskedIntrinsic; 18111 return TargetLowering::shouldExpandAtomicRMWInIR(AI); 18112 } 18113 18114 TargetLowering::AtomicExpansionKind 18115 PPCTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { 18116 unsigned Size = AI->getNewValOperand()->getType()->getPrimitiveSizeInBits(); 18117 if (shouldInlineQuadwordAtomics() && Size == 128) 18118 return AtomicExpansionKind::MaskedIntrinsic; 18119 return TargetLowering::shouldExpandAtomicCmpXchgInIR(AI); 18120 } 18121 18122 static Intrinsic::ID 18123 getIntrinsicForAtomicRMWBinOp128(AtomicRMWInst::BinOp BinOp) { 18124 switch (BinOp) { 18125 default: 18126 llvm_unreachable("Unexpected AtomicRMW BinOp"); 18127 case AtomicRMWInst::Xchg: 18128 return Intrinsic::ppc_atomicrmw_xchg_i128; 18129 case AtomicRMWInst::Add: 18130 return Intrinsic::ppc_atomicrmw_add_i128; 18131 case AtomicRMWInst::Sub: 18132 return Intrinsic::ppc_atomicrmw_sub_i128; 18133 case AtomicRMWInst::And: 18134 return Intrinsic::ppc_atomicrmw_and_i128; 18135 case AtomicRMWInst::Or: 18136 return Intrinsic::ppc_atomicrmw_or_i128; 18137 case AtomicRMWInst::Xor: 18138 return Intrinsic::ppc_atomicrmw_xor_i128; 18139 case AtomicRMWInst::Nand: 18140 return Intrinsic::ppc_atomicrmw_nand_i128; 18141 } 18142 } 18143 18144 Value *PPCTargetLowering::emitMaskedAtomicRMWIntrinsic( 18145 IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 18146 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 18147 assert(shouldInlineQuadwordAtomics() && "Only support quadword now"); 18148 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 18149 Type *ValTy = Incr->getType(); 18150 assert(ValTy->getPrimitiveSizeInBits() == 128); 18151 Function *RMW = Intrinsic::getDeclaration( 18152 M, getIntrinsicForAtomicRMWBinOp128(AI->getOperation())); 18153 Type *Int64Ty = Type::getInt64Ty(M->getContext()); 18154 Value *IncrLo = Builder.CreateTrunc(Incr, Int64Ty, "incr_lo"); 18155 Value *IncrHi = 18156 Builder.CreateTrunc(Builder.CreateLShr(Incr, 64), Int64Ty, "incr_hi"); 18157 Value *Addr = 18158 Builder.CreateBitCast(AlignedAddr, Type::getInt8PtrTy(M->getContext())); 18159 Value *LoHi = Builder.CreateCall(RMW, {Addr, IncrLo, IncrHi}); 18160 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); 18161 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); 18162 Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); 18163 Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); 18164 return Builder.CreateOr( 18165 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64"); 18166 } 18167 18168 Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 18169 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 18170 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 18171 assert(shouldInlineQuadwordAtomics() && "Only support quadword now"); 18172 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 18173 Type *ValTy = CmpVal->getType(); 18174 assert(ValTy->getPrimitiveSizeInBits() == 128); 18175 Function *IntCmpXchg = 18176 Intrinsic::getDeclaration(M, Intrinsic::ppc_cmpxchg_i128); 18177 Type *Int64Ty = Type::getInt64Ty(M->getContext()); 18178 Value *CmpLo = Builder.CreateTrunc(CmpVal, Int64Ty, "cmp_lo"); 18179 Value *CmpHi = 18180 Builder.CreateTrunc(Builder.CreateLShr(CmpVal, 64), Int64Ty, "cmp_hi"); 18181 Value *NewLo = Builder.CreateTrunc(NewVal, Int64Ty, "new_lo"); 18182 Value *NewHi = 18183 Builder.CreateTrunc(Builder.CreateLShr(NewVal, 64), Int64Ty, "new_hi"); 18184 Value *Addr = 18185 Builder.CreateBitCast(AlignedAddr, Type::getInt8PtrTy(M->getContext())); 18186 emitLeadingFence(Builder, CI, Ord); 18187 Value *LoHi = 18188 Builder.CreateCall(IntCmpXchg, {Addr, CmpLo, CmpHi, NewLo, NewHi}); 18189 emitTrailingFence(Builder, CI, Ord); 18190 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); 18191 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); 18192 Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); 18193 Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); 18194 return Builder.CreateOr( 18195 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64"); 18196 } 18197