1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Custom DAG lowering for SI 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIISelLowering.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUInstrInfo.h" 17 #include "AMDGPUTargetMachine.h" 18 #include "SIMachineFunctionInfo.h" 19 #include "SIRegisterInfo.h" 20 #include "llvm/ADT/FloatingPointMode.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 23 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 24 #include "llvm/BinaryFormat/ELF.h" 25 #include "llvm/CodeGen/Analysis.h" 26 #include "llvm/CodeGen/FunctionLoweringInfo.h" 27 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 28 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineFunction.h" 31 #include "llvm/CodeGen/MachineLoopInfo.h" 32 #include "llvm/IR/DiagnosticInfo.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/IntrinsicsAMDGPU.h" 35 #include "llvm/IR/IntrinsicsR600.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/KnownBits.h" 38 39 using namespace llvm; 40 41 #define DEBUG_TYPE "si-lower" 42 43 STATISTIC(NumTailCalls, "Number of tail calls"); 44 45 static cl::opt<bool> DisableLoopAlignment( 46 "amdgpu-disable-loop-alignment", 47 cl::desc("Do not align and prefetch loops"), 48 cl::init(false)); 49 50 static cl::opt<bool> UseDivergentRegisterIndexing( 51 "amdgpu-use-divergent-register-indexing", 52 cl::Hidden, 53 cl::desc("Use indirect register addressing for divergent indexes"), 54 cl::init(false)); 55 56 static bool hasFP32Denormals(const MachineFunction &MF) { 57 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 58 return Info->getMode().allFP32Denormals(); 59 } 60 61 static bool hasFP64FP16Denormals(const MachineFunction &MF) { 62 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 63 return Info->getMode().allFP64FP16Denormals(); 64 } 65 66 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 67 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 68 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 69 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 70 return AMDGPU::SGPR0 + Reg; 71 } 72 } 73 llvm_unreachable("Cannot allocate sgpr"); 74 } 75 76 SITargetLowering::SITargetLowering(const TargetMachine &TM, 77 const GCNSubtarget &STI) 78 : AMDGPUTargetLowering(TM, STI), 79 Subtarget(&STI) { 80 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 81 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 82 83 addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); 84 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 85 86 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 87 88 const SIRegisterInfo *TRI = STI.getRegisterInfo(); 89 const TargetRegisterClass *V64RegClass = TRI->getVGPR64Class(); 90 91 addRegisterClass(MVT::f64, V64RegClass); 92 addRegisterClass(MVT::v2f32, V64RegClass); 93 94 addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass); 95 addRegisterClass(MVT::v3f32, TRI->getVGPRClassForBitWidth(96)); 96 97 addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass); 98 addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass); 99 100 addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass); 101 addRegisterClass(MVT::v4f32, TRI->getVGPRClassForBitWidth(128)); 102 103 addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass); 104 addRegisterClass(MVT::v5f32, TRI->getVGPRClassForBitWidth(160)); 105 106 addRegisterClass(MVT::v6i32, &AMDGPU::SGPR_192RegClass); 107 addRegisterClass(MVT::v6f32, TRI->getVGPRClassForBitWidth(192)); 108 109 addRegisterClass(MVT::v3i64, &AMDGPU::SGPR_192RegClass); 110 addRegisterClass(MVT::v3f64, TRI->getVGPRClassForBitWidth(192)); 111 112 addRegisterClass(MVT::v7i32, &AMDGPU::SGPR_224RegClass); 113 addRegisterClass(MVT::v7f32, TRI->getVGPRClassForBitWidth(224)); 114 115 addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass); 116 addRegisterClass(MVT::v8f32, TRI->getVGPRClassForBitWidth(256)); 117 118 addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass); 119 addRegisterClass(MVT::v4f64, TRI->getVGPRClassForBitWidth(256)); 120 121 addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass); 122 addRegisterClass(MVT::v16f32, TRI->getVGPRClassForBitWidth(512)); 123 124 addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass); 125 addRegisterClass(MVT::v8f64, TRI->getVGPRClassForBitWidth(512)); 126 127 addRegisterClass(MVT::v16i64, &AMDGPU::SGPR_1024RegClass); 128 addRegisterClass(MVT::v16f64, TRI->getVGPRClassForBitWidth(1024)); 129 130 if (Subtarget->has16BitInsts()) { 131 addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass); 132 addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass); 133 134 // Unless there are also VOP3P operations, not operations are really legal. 135 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32RegClass); 136 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass); 137 addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass); 138 addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass); 139 addRegisterClass(MVT::v8i16, &AMDGPU::SGPR_128RegClass); 140 addRegisterClass(MVT::v8f16, &AMDGPU::SGPR_128RegClass); 141 addRegisterClass(MVT::v16i16, &AMDGPU::SGPR_256RegClass); 142 addRegisterClass(MVT::v16f16, &AMDGPU::SGPR_256RegClass); 143 } 144 145 addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass); 146 addRegisterClass(MVT::v32f32, TRI->getVGPRClassForBitWidth(1024)); 147 148 computeRegisterProperties(Subtarget->getRegisterInfo()); 149 150 // The boolean content concept here is too inflexible. Compares only ever 151 // really produce a 1-bit result. Any copy/extend from these will turn into a 152 // select, and zext/1 or sext/-1 are equally cheap. Arbitrarily choose 0/1, as 153 // it's what most targets use. 154 setBooleanContents(ZeroOrOneBooleanContent); 155 setBooleanVectorContents(ZeroOrOneBooleanContent); 156 157 // We need to custom lower vector stores from local memory 158 setOperationAction(ISD::LOAD, 159 {MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, 160 MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32, MVT::i1, 161 MVT::v32i32}, 162 Custom); 163 164 setOperationAction(ISD::STORE, 165 {MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, 166 MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32, MVT::i1, 167 MVT::v32i32}, 168 Custom); 169 170 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); 171 setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand); 172 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 173 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); 174 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); 175 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); 176 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); 177 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); 178 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); 179 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); 180 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); 181 setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand); 182 setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand); 183 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand); 184 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Expand); 185 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Expand); 186 187 setTruncStoreAction(MVT::v3i64, MVT::v3i16, Expand); 188 setTruncStoreAction(MVT::v3i64, MVT::v3i32, Expand); 189 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Expand); 190 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Expand); 191 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Expand); 192 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Expand); 193 setTruncStoreAction(MVT::v16i64, MVT::v16i32, Expand); 194 195 setOperationAction(ISD::GlobalAddress, {MVT::i32, MVT::i64}, Custom); 196 197 setOperationAction(ISD::SELECT, MVT::i1, Promote); 198 setOperationAction(ISD::SELECT, MVT::i64, Custom); 199 setOperationAction(ISD::SELECT, MVT::f64, Promote); 200 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 201 202 setOperationAction(ISD::SELECT_CC, 203 {MVT::f32, MVT::i32, MVT::i64, MVT::f64, MVT::i1}, Expand); 204 205 setOperationAction(ISD::SETCC, MVT::i1, Promote); 206 setOperationAction(ISD::SETCC, {MVT::v2i1, MVT::v4i1}, Expand); 207 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); 208 209 setOperationAction(ISD::TRUNCATE, 210 {MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, 211 MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32}, 212 Expand); 213 setOperationAction(ISD::FP_ROUND, 214 {MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32, 215 MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32}, 216 Expand); 217 218 setOperationAction(ISD::SIGN_EXTEND_INREG, 219 {MVT::v2i1, MVT::v4i1, MVT::v2i8, MVT::v4i8, MVT::v2i16, 220 MVT::v3i16, MVT::v4i16, MVT::Other}, 221 Custom); 222 223 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 224 setOperationAction(ISD::BR_CC, 225 {MVT::i1, MVT::i32, MVT::i64, MVT::f32, MVT::f64}, Expand); 226 227 setOperationAction({ISD::UADDO, ISD::USUBO}, MVT::i32, Legal); 228 229 setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY}, MVT::i32, Legal); 230 231 setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, MVT::i64, 232 Expand); 233 234 #if 0 235 setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY}, MVT::i64, Legal); 236 #endif 237 238 // We only support LOAD/STORE and vector manipulation ops for vectors 239 // with > 4 elements. 240 for (MVT VT : 241 {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, 242 MVT::v2f64, MVT::v4i16, MVT::v4f16, MVT::v3i64, MVT::v3f64, 243 MVT::v6i32, MVT::v6f32, MVT::v4i64, MVT::v4f64, MVT::v8i64, 244 MVT::v8f64, MVT::v8i16, MVT::v8f16, MVT::v16i16, MVT::v16f16, 245 MVT::v16i64, MVT::v16f64, MVT::v32i32, MVT::v32f32}) { 246 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 247 switch (Op) { 248 case ISD::LOAD: 249 case ISD::STORE: 250 case ISD::BUILD_VECTOR: 251 case ISD::BITCAST: 252 case ISD::EXTRACT_VECTOR_ELT: 253 case ISD::INSERT_VECTOR_ELT: 254 case ISD::EXTRACT_SUBVECTOR: 255 case ISD::SCALAR_TO_VECTOR: 256 break; 257 case ISD::INSERT_SUBVECTOR: 258 case ISD::CONCAT_VECTORS: 259 setOperationAction(Op, VT, Custom); 260 break; 261 default: 262 setOperationAction(Op, VT, Expand); 263 break; 264 } 265 } 266 } 267 268 setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand); 269 270 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 271 // is expanded to avoid having two separate loops in case the index is a VGPR. 272 273 // Most operations are naturally 32-bit vector operations. We only support 274 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 275 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 276 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 277 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 278 279 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 280 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 281 282 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 283 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 284 285 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 286 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 287 } 288 289 for (MVT Vec64 : { MVT::v3i64, MVT::v3f64 }) { 290 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 291 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v6i32); 292 293 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 294 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v6i32); 295 296 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 297 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v6i32); 298 299 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 300 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v6i32); 301 } 302 303 for (MVT Vec64 : { MVT::v4i64, MVT::v4f64 }) { 304 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 305 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v8i32); 306 307 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 308 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v8i32); 309 310 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 311 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v8i32); 312 313 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 314 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v8i32); 315 } 316 317 for (MVT Vec64 : { MVT::v8i64, MVT::v8f64 }) { 318 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 319 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v16i32); 320 321 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 322 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v16i32); 323 324 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 325 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v16i32); 326 327 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 328 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v16i32); 329 } 330 331 for (MVT Vec64 : { MVT::v16i64, MVT::v16f64 }) { 332 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 333 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v32i32); 334 335 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 336 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v32i32); 337 338 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 339 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v32i32); 340 341 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 342 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v32i32); 343 } 344 345 setOperationAction(ISD::VECTOR_SHUFFLE, 346 {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32}, 347 Expand); 348 349 setOperationAction(ISD::BUILD_VECTOR, {MVT::v4f16, MVT::v4i16}, Custom); 350 351 // Avoid stack access for these. 352 // TODO: Generalize to more vector types. 353 setOperationAction({ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}, 354 {MVT::v2i16, MVT::v2f16, MVT::v2i8, MVT::v4i8, MVT::v8i8, 355 MVT::v4i16, MVT::v4f16, MVT::v16i16, MVT::v16f16}, 356 Custom); 357 358 // Deal with vec3 vector operations when widened to vec4. 359 setOperationAction(ISD::INSERT_SUBVECTOR, 360 {MVT::v3i32, MVT::v3f32, MVT::v4i32, MVT::v4f32}, Custom); 361 362 // Deal with vec5/6/7 vector operations when widened to vec8. 363 setOperationAction(ISD::INSERT_SUBVECTOR, 364 {MVT::v5i32, MVT::v5f32, MVT::v6i32, MVT::v6f32, 365 MVT::v7i32, MVT::v7f32, MVT::v8i32, MVT::v8f32}, 366 Custom); 367 368 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 369 // and output demarshalling 370 setOperationAction(ISD::ATOMIC_CMP_SWAP, {MVT::i32, MVT::i64}, Custom); 371 372 // We can't return success/failure, only the old value, 373 // let LLVM add the comparison 374 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, {MVT::i32, MVT::i64}, 375 Expand); 376 377 if (Subtarget->hasFlatAddressSpace()) 378 setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom); 379 380 setOperationAction(ISD::BITREVERSE, {MVT::i32, MVT::i64}, Legal); 381 382 // FIXME: This should be narrowed to i32, but that only happens if i64 is 383 // illegal. 384 // FIXME: Should lower sub-i32 bswaps to bit-ops without v_perm_b32. 385 setOperationAction(ISD::BSWAP, {MVT::i64, MVT::i32}, Legal); 386 387 // On SI this is s_memtime and s_memrealtime on VI. 388 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 389 setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Custom); 390 391 if (Subtarget->has16BitInsts()) { 392 setOperationAction({ISD::FPOW, ISD::FPOWI}, MVT::f16, Promote); 393 setOperationAction({ISD::FLOG, ISD::FEXP, ISD::FLOG10}, MVT::f16, Custom); 394 } 395 396 if (Subtarget->hasMadMacF32Insts()) 397 setOperationAction(ISD::FMAD, MVT::f32, Legal); 398 399 if (!Subtarget->hasBFI()) 400 // fcopysign can be done in a single instruction with BFI. 401 setOperationAction(ISD::FCOPYSIGN, {MVT::f32, MVT::f64}, Expand); 402 403 if (!Subtarget->hasBCNT(32)) 404 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 405 406 if (!Subtarget->hasBCNT(64)) 407 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 408 409 if (Subtarget->hasFFBH()) 410 setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Custom); 411 412 if (Subtarget->hasFFBL()) 413 setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom); 414 415 // We only really have 32-bit BFE instructions (and 16-bit on VI). 416 // 417 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any 418 // effort to match them now. We want this to be false for i64 cases when the 419 // extraction isn't restricted to the upper or lower half. Ideally we would 420 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that 421 // span the midpoint are probably relatively rare, so don't worry about them 422 // for now. 423 if (Subtarget->hasBFE()) 424 setHasExtractBitsInsn(true); 425 426 // Clamp modifier on add/sub 427 if (Subtarget->hasIntClamp()) 428 setOperationAction({ISD::UADDSAT, ISD::USUBSAT}, MVT::i32, Legal); 429 430 if (Subtarget->hasAddNoCarry()) 431 setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, {MVT::i16, MVT::i32}, 432 Legal); 433 434 setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, {MVT::f32, MVT::f64}, 435 Custom); 436 437 // These are really only legal for ieee_mode functions. We should be avoiding 438 // them for functions that don't have ieee_mode enabled, so just say they are 439 // legal. 440 setOperationAction({ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE}, 441 {MVT::f32, MVT::f64}, Legal); 442 443 if (Subtarget->haveRoundOpsF64()) 444 setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FRINT}, MVT::f64, Legal); 445 else 446 setOperationAction({ISD::FCEIL, ISD::FTRUNC, ISD::FRINT, ISD::FFLOOR}, 447 MVT::f64, Custom); 448 449 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 450 451 setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FDIV}, MVT::f32, Custom); 452 setOperationAction(ISD::FDIV, MVT::f64, Custom); 453 454 if (Subtarget->has16BitInsts()) { 455 setOperationAction({ISD::Constant, ISD::SMIN, ISD::SMAX, ISD::UMIN, 456 ISD::UMAX, ISD::UADDSAT, ISD::USUBSAT}, 457 MVT::i16, Legal); 458 459 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); 460 461 setOperationAction({ISD::ROTR, ISD::ROTL, ISD::SELECT_CC, ISD::BR_CC}, 462 MVT::i16, Expand); 463 464 setOperationAction({ISD::SIGN_EXTEND, ISD::SDIV, ISD::UDIV, ISD::SREM, 465 ISD::UREM, ISD::BITREVERSE, ISD::CTTZ, 466 ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF, 467 ISD::CTPOP}, 468 MVT::i16, Promote); 469 470 setOperationAction(ISD::LOAD, MVT::i16, Custom); 471 472 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 473 474 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); 475 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); 476 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); 477 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); 478 479 setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT}, MVT::i16, Custom); 480 481 // F16 - Constant Actions. 482 setOperationAction(ISD::ConstantFP, MVT::f16, Legal); 483 484 // F16 - Load/Store Actions. 485 setOperationAction(ISD::LOAD, MVT::f16, Promote); 486 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); 487 setOperationAction(ISD::STORE, MVT::f16, Promote); 488 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); 489 490 // F16 - VOP1 Actions. 491 setOperationAction( 492 {ISD::FP_ROUND, ISD::FCOS, ISD::FSIN, ISD::FROUND, ISD::FPTRUNC_ROUND}, 493 MVT::f16, Custom); 494 495 setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP}, MVT::i16, Custom); 496 497 setOperationAction( 498 {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP}, 499 MVT::f16, Promote); 500 501 // F16 - VOP2 Actions. 502 setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, MVT::f16, Expand); 503 504 setOperationAction(ISD::FDIV, MVT::f16, Custom); 505 506 // F16 - VOP3 Actions. 507 setOperationAction(ISD::FMA, MVT::f16, Legal); 508 if (STI.hasMadF16()) 509 setOperationAction(ISD::FMAD, MVT::f16, Legal); 510 511 for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16, MVT::v8i16, 512 MVT::v8f16, MVT::v16i16, MVT::v16f16}) { 513 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 514 switch (Op) { 515 case ISD::LOAD: 516 case ISD::STORE: 517 case ISD::BUILD_VECTOR: 518 case ISD::BITCAST: 519 case ISD::EXTRACT_VECTOR_ELT: 520 case ISD::INSERT_VECTOR_ELT: 521 case ISD::INSERT_SUBVECTOR: 522 case ISD::EXTRACT_SUBVECTOR: 523 case ISD::SCALAR_TO_VECTOR: 524 break; 525 case ISD::CONCAT_VECTORS: 526 setOperationAction(Op, VT, Custom); 527 break; 528 default: 529 setOperationAction(Op, VT, Expand); 530 break; 531 } 532 } 533 } 534 535 // v_perm_b32 can handle either of these. 536 setOperationAction(ISD::BSWAP, {MVT::i16, MVT::v2i16}, Legal); 537 setOperationAction(ISD::BSWAP, MVT::v4i16, Custom); 538 539 // XXX - Do these do anything? Vector constants turn into build_vector. 540 setOperationAction(ISD::Constant, {MVT::v2i16, MVT::v2f16}, Legal); 541 542 setOperationAction(ISD::UNDEF, {MVT::v2i16, MVT::v2f16}, Legal); 543 544 setOperationAction(ISD::STORE, MVT::v2i16, Promote); 545 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); 546 setOperationAction(ISD::STORE, MVT::v2f16, Promote); 547 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); 548 549 setOperationAction(ISD::LOAD, MVT::v2i16, Promote); 550 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); 551 setOperationAction(ISD::LOAD, MVT::v2f16, Promote); 552 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); 553 554 setOperationAction(ISD::AND, MVT::v2i16, Promote); 555 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); 556 setOperationAction(ISD::OR, MVT::v2i16, Promote); 557 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); 558 setOperationAction(ISD::XOR, MVT::v2i16, Promote); 559 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); 560 561 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 562 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32); 563 setOperationAction(ISD::LOAD, MVT::v4f16, Promote); 564 AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32); 565 566 setOperationAction(ISD::STORE, MVT::v4i16, Promote); 567 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32); 568 setOperationAction(ISD::STORE, MVT::v4f16, Promote); 569 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32); 570 571 setOperationAction(ISD::LOAD, MVT::v8i16, Promote); 572 AddPromotedToType(ISD::LOAD, MVT::v8i16, MVT::v4i32); 573 setOperationAction(ISD::LOAD, MVT::v8f16, Promote); 574 AddPromotedToType(ISD::LOAD, MVT::v8f16, MVT::v4i32); 575 576 setOperationAction(ISD::STORE, MVT::v4i16, Promote); 577 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32); 578 setOperationAction(ISD::STORE, MVT::v4f16, Promote); 579 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32); 580 581 setOperationAction(ISD::STORE, MVT::v8i16, Promote); 582 AddPromotedToType(ISD::STORE, MVT::v8i16, MVT::v4i32); 583 setOperationAction(ISD::STORE, MVT::v8f16, Promote); 584 AddPromotedToType(ISD::STORE, MVT::v8f16, MVT::v4i32); 585 586 setOperationAction(ISD::LOAD, MVT::v16i16, Promote); 587 AddPromotedToType(ISD::LOAD, MVT::v16i16, MVT::v8i32); 588 setOperationAction(ISD::LOAD, MVT::v16f16, Promote); 589 AddPromotedToType(ISD::LOAD, MVT::v16f16, MVT::v8i32); 590 591 setOperationAction(ISD::STORE, MVT::v16i16, Promote); 592 AddPromotedToType(ISD::STORE, MVT::v16i16, MVT::v8i32); 593 setOperationAction(ISD::STORE, MVT::v16f16, Promote); 594 AddPromotedToType(ISD::STORE, MVT::v16f16, MVT::v8i32); 595 596 setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND}, 597 MVT::v2i32, Expand); 598 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); 599 600 setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND}, 601 MVT::v4i32, Expand); 602 603 setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND}, 604 MVT::v8i32, Expand); 605 606 if (!Subtarget->hasVOP3PInsts()) 607 setOperationAction(ISD::BUILD_VECTOR, {MVT::v2i16, MVT::v2f16}, Custom); 608 609 setOperationAction(ISD::FNEG, MVT::v2f16, Legal); 610 // This isn't really legal, but this avoids the legalizer unrolling it (and 611 // allows matching fneg (fabs x) patterns) 612 setOperationAction(ISD::FABS, MVT::v2f16, Legal); 613 614 setOperationAction({ISD::FMAXNUM, ISD::FMINNUM}, MVT::f16, Custom); 615 setOperationAction({ISD::FMAXNUM_IEEE, ISD::FMINNUM_IEEE}, MVT::f16, Legal); 616 617 setOperationAction({ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE}, 618 {MVT::v4f16, MVT::v8f16, MVT::v16f16}, Custom); 619 620 setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, 621 {MVT::v4f16, MVT::v8f16, MVT::v16f16}, Expand); 622 623 for (MVT Vec16 : {MVT::v8i16, MVT::v8f16, MVT::v16i16, MVT::v16f16}) { 624 setOperationAction( 625 {ISD::BUILD_VECTOR, ISD::EXTRACT_VECTOR_ELT, ISD::SCALAR_TO_VECTOR}, 626 Vec16, Custom); 627 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec16, Expand); 628 } 629 } 630 631 if (Subtarget->hasVOP3PInsts()) { 632 setOperationAction({ISD::ADD, ISD::SUB, ISD::MUL, ISD::SHL, ISD::SRL, 633 ISD::SRA, ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX, 634 ISD::UADDSAT, ISD::USUBSAT, ISD::SADDSAT, ISD::SSUBSAT}, 635 MVT::v2i16, Legal); 636 637 setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FMINNUM_IEEE, 638 ISD::FMAXNUM_IEEE, ISD::FCANONICALIZE}, 639 MVT::v2f16, Legal); 640 641 setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2i16, MVT::v2f16}, 642 Custom); 643 644 setOperationAction(ISD::VECTOR_SHUFFLE, 645 {MVT::v4f16, MVT::v4i16, MVT::v8f16, MVT::v8i16, 646 MVT::v16f16, MVT::v16i16}, 647 Custom); 648 649 for (MVT VT : {MVT::v4i16, MVT::v8i16, MVT::v16i16}) 650 // Split vector operations. 651 setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL, ISD::ADD, ISD::SUB, 652 ISD::MUL, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, 653 ISD::UADDSAT, ISD::SADDSAT, ISD::USUBSAT, 654 ISD::SSUBSAT}, 655 VT, Custom); 656 657 for (MVT VT : {MVT::v4f16, MVT::v8f16, MVT::v16f16}) 658 // Split vector operations. 659 setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FCANONICALIZE}, 660 VT, Custom); 661 662 setOperationAction({ISD::FMAXNUM, ISD::FMINNUM}, {MVT::v2f16, MVT::v4f16}, 663 Custom); 664 665 setOperationAction(ISD::FEXP, MVT::v2f16, Custom); 666 setOperationAction(ISD::SELECT, {MVT::v4i16, MVT::v4f16}, Custom); 667 668 if (Subtarget->hasPackedFP32Ops()) { 669 setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FNEG}, 670 MVT::v2f32, Legal); 671 setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA}, 672 {MVT::v4f32, MVT::v8f32, MVT::v16f32, MVT::v32f32}, 673 Custom); 674 } 675 } 676 677 setOperationAction({ISD::FNEG, ISD::FABS}, MVT::v4f16, Custom); 678 679 if (Subtarget->has16BitInsts()) { 680 setOperationAction(ISD::SELECT, MVT::v2i16, Promote); 681 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); 682 setOperationAction(ISD::SELECT, MVT::v2f16, Promote); 683 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); 684 } else { 685 // Legalization hack. 686 setOperationAction(ISD::SELECT, {MVT::v2i16, MVT::v2f16}, Custom); 687 688 setOperationAction({ISD::FNEG, ISD::FABS}, MVT::v2f16, Custom); 689 } 690 691 setOperationAction(ISD::SELECT, 692 {MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8, 693 MVT::v8i16, MVT::v8f16, MVT::v16i16, MVT::v16f16}, 694 Custom); 695 696 setOperationAction({ISD::SMULO, ISD::UMULO}, MVT::i64, Custom); 697 698 if (Subtarget->hasMad64_32()) 699 setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32, Custom); 700 701 setOperationAction(ISD::INTRINSIC_WO_CHAIN, 702 {MVT::Other, MVT::f32, MVT::v4f32, MVT::i16, MVT::f16, 703 MVT::v2i16, MVT::v2f16}, 704 Custom); 705 706 setOperationAction(ISD::INTRINSIC_W_CHAIN, 707 {MVT::v2f16, MVT::v2i16, MVT::v3f16, MVT::v3i16, 708 MVT::v4f16, MVT::v4i16, MVT::v8f16, MVT::Other, MVT::f16, 709 MVT::i16, MVT::i8}, 710 Custom); 711 712 setOperationAction(ISD::INTRINSIC_VOID, 713 {MVT::Other, MVT::v2i16, MVT::v2f16, MVT::v3i16, 714 MVT::v3f16, MVT::v4f16, MVT::v4i16, MVT::f16, MVT::i16, 715 MVT::i8}, 716 Custom); 717 718 setTargetDAGCombine({ISD::ADD, 719 ISD::ADDCARRY, 720 ISD::SUB, 721 ISD::SUBCARRY, 722 ISD::FADD, 723 ISD::FSUB, 724 ISD::FMINNUM, 725 ISD::FMAXNUM, 726 ISD::FMINNUM_IEEE, 727 ISD::FMAXNUM_IEEE, 728 ISD::FMA, 729 ISD::SMIN, 730 ISD::SMAX, 731 ISD::UMIN, 732 ISD::UMAX, 733 ISD::SETCC, 734 ISD::AND, 735 ISD::OR, 736 ISD::XOR, 737 ISD::SINT_TO_FP, 738 ISD::UINT_TO_FP, 739 ISD::FCANONICALIZE, 740 ISD::SCALAR_TO_VECTOR, 741 ISD::ZERO_EXTEND, 742 ISD::SIGN_EXTEND_INREG, 743 ISD::EXTRACT_VECTOR_ELT, 744 ISD::INSERT_VECTOR_ELT}); 745 746 // All memory operations. Some folding on the pointer operand is done to help 747 // matching the constant offsets in the addressing modes. 748 setTargetDAGCombine({ISD::LOAD, 749 ISD::STORE, 750 ISD::ATOMIC_LOAD, 751 ISD::ATOMIC_STORE, 752 ISD::ATOMIC_CMP_SWAP, 753 ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, 754 ISD::ATOMIC_SWAP, 755 ISD::ATOMIC_LOAD_ADD, 756 ISD::ATOMIC_LOAD_SUB, 757 ISD::ATOMIC_LOAD_AND, 758 ISD::ATOMIC_LOAD_OR, 759 ISD::ATOMIC_LOAD_XOR, 760 ISD::ATOMIC_LOAD_NAND, 761 ISD::ATOMIC_LOAD_MIN, 762 ISD::ATOMIC_LOAD_MAX, 763 ISD::ATOMIC_LOAD_UMIN, 764 ISD::ATOMIC_LOAD_UMAX, 765 ISD::ATOMIC_LOAD_FADD, 766 ISD::INTRINSIC_VOID, 767 ISD::INTRINSIC_W_CHAIN}); 768 769 // FIXME: In other contexts we pretend this is a per-function property. 770 setStackPointerRegisterToSaveRestore(AMDGPU::SGPR32); 771 772 setSchedulingPreference(Sched::RegPressure); 773 } 774 775 const GCNSubtarget *SITargetLowering::getSubtarget() const { 776 return Subtarget; 777 } 778 779 //===----------------------------------------------------------------------===// 780 // TargetLowering queries 781 //===----------------------------------------------------------------------===// 782 783 // v_mad_mix* support a conversion from f16 to f32. 784 // 785 // There is only one special case when denormals are enabled we don't currently, 786 // where this is OK to use. 787 bool SITargetLowering::isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, 788 EVT DestVT, EVT SrcVT) const { 789 return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) || 790 (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) && 791 DestVT.getScalarType() == MVT::f32 && 792 SrcVT.getScalarType() == MVT::f16 && 793 // TODO: This probably only requires no input flushing? 794 !hasFP32Denormals(DAG.getMachineFunction()); 795 } 796 797 bool SITargetLowering::isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, 798 LLT DestTy, LLT SrcTy) const { 799 return ((Opcode == TargetOpcode::G_FMAD && Subtarget->hasMadMixInsts()) || 800 (Opcode == TargetOpcode::G_FMA && Subtarget->hasFmaMixInsts())) && 801 DestTy.getScalarSizeInBits() == 32 && 802 SrcTy.getScalarSizeInBits() == 16 && 803 // TODO: This probably only requires no input flushing? 804 !hasFP32Denormals(*MI.getMF()); 805 } 806 807 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { 808 // SI has some legal vector types, but no legal vector operations. Say no 809 // shuffles are legal in order to prefer scalarizing some vector operations. 810 return false; 811 } 812 813 MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, 814 CallingConv::ID CC, 815 EVT VT) const { 816 if (CC == CallingConv::AMDGPU_KERNEL) 817 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); 818 819 if (VT.isVector()) { 820 EVT ScalarVT = VT.getScalarType(); 821 unsigned Size = ScalarVT.getSizeInBits(); 822 if (Size == 16) { 823 if (Subtarget->has16BitInsts()) 824 return VT.isInteger() ? MVT::v2i16 : MVT::v2f16; 825 return VT.isInteger() ? MVT::i32 : MVT::f32; 826 } 827 828 if (Size < 16) 829 return Subtarget->has16BitInsts() ? MVT::i16 : MVT::i32; 830 return Size == 32 ? ScalarVT.getSimpleVT() : MVT::i32; 831 } 832 833 if (VT.getSizeInBits() > 32) 834 return MVT::i32; 835 836 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); 837 } 838 839 unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, 840 CallingConv::ID CC, 841 EVT VT) const { 842 if (CC == CallingConv::AMDGPU_KERNEL) 843 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); 844 845 if (VT.isVector()) { 846 unsigned NumElts = VT.getVectorNumElements(); 847 EVT ScalarVT = VT.getScalarType(); 848 unsigned Size = ScalarVT.getSizeInBits(); 849 850 // FIXME: Should probably promote 8-bit vectors to i16. 851 if (Size == 16 && Subtarget->has16BitInsts()) 852 return (NumElts + 1) / 2; 853 854 if (Size <= 32) 855 return NumElts; 856 857 if (Size > 32) 858 return NumElts * ((Size + 31) / 32); 859 } else if (VT.getSizeInBits() > 32) 860 return (VT.getSizeInBits() + 31) / 32; 861 862 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); 863 } 864 865 unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv( 866 LLVMContext &Context, CallingConv::ID CC, 867 EVT VT, EVT &IntermediateVT, 868 unsigned &NumIntermediates, MVT &RegisterVT) const { 869 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { 870 unsigned NumElts = VT.getVectorNumElements(); 871 EVT ScalarVT = VT.getScalarType(); 872 unsigned Size = ScalarVT.getSizeInBits(); 873 // FIXME: We should fix the ABI to be the same on targets without 16-bit 874 // support, but unless we can properly handle 3-vectors, it will be still be 875 // inconsistent. 876 if (Size == 16 && Subtarget->has16BitInsts()) { 877 RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16; 878 IntermediateVT = RegisterVT; 879 NumIntermediates = (NumElts + 1) / 2; 880 return NumIntermediates; 881 } 882 883 if (Size == 32) { 884 RegisterVT = ScalarVT.getSimpleVT(); 885 IntermediateVT = RegisterVT; 886 NumIntermediates = NumElts; 887 return NumIntermediates; 888 } 889 890 if (Size < 16 && Subtarget->has16BitInsts()) { 891 // FIXME: Should probably form v2i16 pieces 892 RegisterVT = MVT::i16; 893 IntermediateVT = ScalarVT; 894 NumIntermediates = NumElts; 895 return NumIntermediates; 896 } 897 898 899 if (Size != 16 && Size <= 32) { 900 RegisterVT = MVT::i32; 901 IntermediateVT = ScalarVT; 902 NumIntermediates = NumElts; 903 return NumIntermediates; 904 } 905 906 if (Size > 32) { 907 RegisterVT = MVT::i32; 908 IntermediateVT = RegisterVT; 909 NumIntermediates = NumElts * ((Size + 31) / 32); 910 return NumIntermediates; 911 } 912 } 913 914 return TargetLowering::getVectorTypeBreakdownForCallingConv( 915 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT); 916 } 917 918 static EVT memVTFromImageData(Type *Ty, unsigned DMaskLanes) { 919 assert(DMaskLanes != 0); 920 921 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) { 922 unsigned NumElts = std::min(DMaskLanes, VT->getNumElements()); 923 return EVT::getVectorVT(Ty->getContext(), 924 EVT::getEVT(VT->getElementType()), 925 NumElts); 926 } 927 928 return EVT::getEVT(Ty); 929 } 930 931 // Peek through TFE struct returns to only use the data size. 932 static EVT memVTFromImageReturn(Type *Ty, unsigned DMaskLanes) { 933 auto *ST = dyn_cast<StructType>(Ty); 934 if (!ST) 935 return memVTFromImageData(Ty, DMaskLanes); 936 937 // Some intrinsics return an aggregate type - special case to work out the 938 // correct memVT. 939 // 940 // Only limited forms of aggregate type currently expected. 941 if (ST->getNumContainedTypes() != 2 || 942 !ST->getContainedType(1)->isIntegerTy(32)) 943 return EVT(); 944 return memVTFromImageData(ST->getContainedType(0), DMaskLanes); 945 } 946 947 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 948 const CallInst &CI, 949 MachineFunction &MF, 950 unsigned IntrID) const { 951 Info.flags = MachineMemOperand::MONone; 952 if (CI.hasMetadata(LLVMContext::MD_invariant_load)) 953 Info.flags |= MachineMemOperand::MOInvariant; 954 955 if (const AMDGPU::RsrcIntrinsic *RsrcIntr = 956 AMDGPU::lookupRsrcIntrinsic(IntrID)) { 957 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(), 958 (Intrinsic::ID)IntrID); 959 if (Attr.hasFnAttr(Attribute::ReadNone)) 960 return false; 961 962 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 963 964 const GCNTargetMachine &TM = 965 static_cast<const GCNTargetMachine &>(getTargetMachine()); 966 967 if (RsrcIntr->IsImage) { 968 Info.ptrVal = MFI->getImagePSV(TM); 969 Info.align.reset(); 970 } else { 971 Info.ptrVal = MFI->getBufferPSV(TM); 972 } 973 974 Info.flags |= MachineMemOperand::MODereferenceable; 975 if (Attr.hasFnAttr(Attribute::ReadOnly)) { 976 unsigned DMaskLanes = 4; 977 978 if (RsrcIntr->IsImage) { 979 const AMDGPU::ImageDimIntrinsicInfo *Intr 980 = AMDGPU::getImageDimIntrinsicInfo(IntrID); 981 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 982 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); 983 984 if (!BaseOpcode->Gather4) { 985 // If this isn't a gather, we may have excess loaded elements in the 986 // IR type. Check the dmask for the real number of elements loaded. 987 unsigned DMask 988 = cast<ConstantInt>(CI.getArgOperand(0))->getZExtValue(); 989 DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask); 990 } 991 992 Info.memVT = memVTFromImageReturn(CI.getType(), DMaskLanes); 993 } else 994 Info.memVT = EVT::getEVT(CI.getType()); 995 996 // FIXME: What does alignment mean for an image? 997 Info.opc = ISD::INTRINSIC_W_CHAIN; 998 Info.flags |= MachineMemOperand::MOLoad; 999 } else if (Attr.hasFnAttr(Attribute::WriteOnly)) { 1000 Info.opc = ISD::INTRINSIC_VOID; 1001 1002 Type *DataTy = CI.getArgOperand(0)->getType(); 1003 if (RsrcIntr->IsImage) { 1004 unsigned DMask = cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue(); 1005 unsigned DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask); 1006 Info.memVT = memVTFromImageData(DataTy, DMaskLanes); 1007 } else 1008 Info.memVT = EVT::getEVT(DataTy); 1009 1010 Info.flags |= MachineMemOperand::MOStore; 1011 } else { 1012 // Atomic 1013 Info.opc = CI.getType()->isVoidTy() ? ISD::INTRINSIC_VOID : 1014 ISD::INTRINSIC_W_CHAIN; 1015 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); 1016 Info.flags |= MachineMemOperand::MOLoad | 1017 MachineMemOperand::MOStore | 1018 MachineMemOperand::MODereferenceable; 1019 1020 // XXX - Should this be volatile without known ordering? 1021 Info.flags |= MachineMemOperand::MOVolatile; 1022 1023 switch (IntrID) { 1024 default: 1025 break; 1026 case Intrinsic::amdgcn_raw_buffer_load_lds: 1027 case Intrinsic::amdgcn_struct_buffer_load_lds: { 1028 unsigned Width = cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue(); 1029 Info.memVT = EVT::getIntegerVT(CI.getContext(), Width * 8); 1030 return true; 1031 } 1032 } 1033 } 1034 return true; 1035 } 1036 1037 switch (IntrID) { 1038 case Intrinsic::amdgcn_atomic_inc: 1039 case Intrinsic::amdgcn_atomic_dec: 1040 case Intrinsic::amdgcn_ds_ordered_add: 1041 case Intrinsic::amdgcn_ds_ordered_swap: 1042 case Intrinsic::amdgcn_ds_fadd: 1043 case Intrinsic::amdgcn_ds_fmin: 1044 case Intrinsic::amdgcn_ds_fmax: { 1045 Info.opc = ISD::INTRINSIC_W_CHAIN; 1046 Info.memVT = MVT::getVT(CI.getType()); 1047 Info.ptrVal = CI.getOperand(0); 1048 Info.align.reset(); 1049 Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1050 1051 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4)); 1052 if (!Vol->isZero()) 1053 Info.flags |= MachineMemOperand::MOVolatile; 1054 1055 return true; 1056 } 1057 case Intrinsic::amdgcn_buffer_atomic_fadd: { 1058 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1059 1060 const GCNTargetMachine &TM = 1061 static_cast<const GCNTargetMachine &>(getTargetMachine()); 1062 1063 Info.opc = ISD::INTRINSIC_W_CHAIN; 1064 Info.memVT = MVT::getVT(CI.getOperand(0)->getType()); 1065 Info.ptrVal = MFI->getBufferPSV(TM); 1066 Info.align.reset(); 1067 Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1068 1069 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); 1070 if (!Vol || !Vol->isZero()) 1071 Info.flags |= MachineMemOperand::MOVolatile; 1072 1073 return true; 1074 } 1075 case Intrinsic::amdgcn_ds_append: 1076 case Intrinsic::amdgcn_ds_consume: { 1077 Info.opc = ISD::INTRINSIC_W_CHAIN; 1078 Info.memVT = MVT::getVT(CI.getType()); 1079 Info.ptrVal = CI.getOperand(0); 1080 Info.align.reset(); 1081 Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1082 1083 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1)); 1084 if (!Vol->isZero()) 1085 Info.flags |= MachineMemOperand::MOVolatile; 1086 1087 return true; 1088 } 1089 case Intrinsic::amdgcn_global_atomic_csub: { 1090 Info.opc = ISD::INTRINSIC_W_CHAIN; 1091 Info.memVT = MVT::getVT(CI.getType()); 1092 Info.ptrVal = CI.getOperand(0); 1093 Info.align.reset(); 1094 Info.flags |= MachineMemOperand::MOLoad | 1095 MachineMemOperand::MOStore | 1096 MachineMemOperand::MOVolatile; 1097 return true; 1098 } 1099 case Intrinsic::amdgcn_image_bvh_intersect_ray: { 1100 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1101 Info.opc = ISD::INTRINSIC_W_CHAIN; 1102 Info.memVT = MVT::getVT(CI.getType()); // XXX: what is correct VT? 1103 1104 const GCNTargetMachine &TM = 1105 static_cast<const GCNTargetMachine &>(getTargetMachine()); 1106 1107 Info.ptrVal = MFI->getImagePSV(TM); 1108 Info.align.reset(); 1109 Info.flags |= MachineMemOperand::MOLoad | 1110 MachineMemOperand::MODereferenceable; 1111 return true; 1112 } 1113 case Intrinsic::amdgcn_global_atomic_fadd: 1114 case Intrinsic::amdgcn_global_atomic_fmin: 1115 case Intrinsic::amdgcn_global_atomic_fmax: 1116 case Intrinsic::amdgcn_flat_atomic_fadd: 1117 case Intrinsic::amdgcn_flat_atomic_fmin: 1118 case Intrinsic::amdgcn_flat_atomic_fmax: 1119 case Intrinsic::amdgcn_global_atomic_fadd_v2bf16: 1120 case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16: { 1121 Info.opc = ISD::INTRINSIC_W_CHAIN; 1122 Info.memVT = MVT::getVT(CI.getType()); 1123 Info.ptrVal = CI.getOperand(0); 1124 Info.align.reset(); 1125 Info.flags |= MachineMemOperand::MOLoad | 1126 MachineMemOperand::MOStore | 1127 MachineMemOperand::MODereferenceable | 1128 MachineMemOperand::MOVolatile; 1129 return true; 1130 } 1131 case Intrinsic::amdgcn_ds_gws_init: 1132 case Intrinsic::amdgcn_ds_gws_barrier: 1133 case Intrinsic::amdgcn_ds_gws_sema_v: 1134 case Intrinsic::amdgcn_ds_gws_sema_br: 1135 case Intrinsic::amdgcn_ds_gws_sema_p: 1136 case Intrinsic::amdgcn_ds_gws_sema_release_all: { 1137 Info.opc = ISD::INTRINSIC_VOID; 1138 1139 const GCNTargetMachine &TM = 1140 static_cast<const GCNTargetMachine &>(getTargetMachine()); 1141 1142 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1143 Info.ptrVal = MFI->getGWSPSV(TM); 1144 1145 // This is an abstract access, but we need to specify a type and size. 1146 Info.memVT = MVT::i32; 1147 Info.size = 4; 1148 Info.align = Align(4); 1149 1150 if (IntrID == Intrinsic::amdgcn_ds_gws_barrier) 1151 Info.flags |= MachineMemOperand::MOLoad; 1152 else 1153 Info.flags |= MachineMemOperand::MOStore; 1154 return true; 1155 } 1156 case Intrinsic::amdgcn_global_load_lds: { 1157 Info.opc = ISD::INTRINSIC_VOID; 1158 unsigned Width = cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue(); 1159 Info.memVT = EVT::getIntegerVT(CI.getContext(), Width * 8); 1160 Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 1161 MachineMemOperand::MOVolatile; 1162 return true; 1163 } 1164 default: 1165 return false; 1166 } 1167 } 1168 1169 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, 1170 SmallVectorImpl<Value*> &Ops, 1171 Type *&AccessTy) const { 1172 switch (II->getIntrinsicID()) { 1173 case Intrinsic::amdgcn_atomic_inc: 1174 case Intrinsic::amdgcn_atomic_dec: 1175 case Intrinsic::amdgcn_ds_ordered_add: 1176 case Intrinsic::amdgcn_ds_ordered_swap: 1177 case Intrinsic::amdgcn_ds_append: 1178 case Intrinsic::amdgcn_ds_consume: 1179 case Intrinsic::amdgcn_ds_fadd: 1180 case Intrinsic::amdgcn_ds_fmin: 1181 case Intrinsic::amdgcn_ds_fmax: 1182 case Intrinsic::amdgcn_global_atomic_fadd: 1183 case Intrinsic::amdgcn_flat_atomic_fadd: 1184 case Intrinsic::amdgcn_flat_atomic_fmin: 1185 case Intrinsic::amdgcn_flat_atomic_fmax: 1186 case Intrinsic::amdgcn_global_atomic_fadd_v2bf16: 1187 case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16: 1188 case Intrinsic::amdgcn_global_atomic_csub: { 1189 Value *Ptr = II->getArgOperand(0); 1190 AccessTy = II->getType(); 1191 Ops.push_back(Ptr); 1192 return true; 1193 } 1194 default: 1195 return false; 1196 } 1197 } 1198 1199 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 1200 if (!Subtarget->hasFlatInstOffsets()) { 1201 // Flat instructions do not have offsets, and only have the register 1202 // address. 1203 return AM.BaseOffs == 0 && AM.Scale == 0; 1204 } 1205 1206 return AM.Scale == 0 && 1207 (AM.BaseOffs == 0 || 1208 Subtarget->getInstrInfo()->isLegalFLATOffset( 1209 AM.BaseOffs, AMDGPUAS::FLAT_ADDRESS, SIInstrFlags::FLAT)); 1210 } 1211 1212 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { 1213 if (Subtarget->hasFlatGlobalInsts()) 1214 return AM.Scale == 0 && 1215 (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset( 1216 AM.BaseOffs, AMDGPUAS::GLOBAL_ADDRESS, 1217 SIInstrFlags::FlatGlobal)); 1218 1219 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) { 1220 // Assume the we will use FLAT for all global memory accesses 1221 // on VI. 1222 // FIXME: This assumption is currently wrong. On VI we still use 1223 // MUBUF instructions for the r + i addressing mode. As currently 1224 // implemented, the MUBUF instructions only work on buffer < 4GB. 1225 // It may be possible to support > 4GB buffers with MUBUF instructions, 1226 // by setting the stride value in the resource descriptor which would 1227 // increase the size limit to (stride * 4GB). However, this is risky, 1228 // because it has never been validated. 1229 return isLegalFlatAddressingMode(AM); 1230 } 1231 1232 return isLegalMUBUFAddressingMode(AM); 1233 } 1234 1235 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 1236 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 1237 // additionally can do r + r + i with addr64. 32-bit has more addressing 1238 // mode options. Depending on the resource constant, it can also do 1239 // (i64 r0) + (i32 r1) * (i14 i). 1240 // 1241 // Private arrays end up using a scratch buffer most of the time, so also 1242 // assume those use MUBUF instructions. Scratch loads / stores are currently 1243 // implemented as mubuf instructions with offen bit set, so slightly 1244 // different than the normal addr64. 1245 if (!SIInstrInfo::isLegalMUBUFImmOffset(AM.BaseOffs)) 1246 return false; 1247 1248 // FIXME: Since we can split immediate into soffset and immediate offset, 1249 // would it make sense to allow any immediate? 1250 1251 switch (AM.Scale) { 1252 case 0: // r + i or just i, depending on HasBaseReg. 1253 return true; 1254 case 1: 1255 return true; // We have r + r or r + i. 1256 case 2: 1257 if (AM.HasBaseReg) { 1258 // Reject 2 * r + r. 1259 return false; 1260 } 1261 1262 // Allow 2 * r as r + r 1263 // Or 2 * r + i is allowed as r + r + i. 1264 return true; 1265 default: // Don't allow n * r 1266 return false; 1267 } 1268 } 1269 1270 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 1271 const AddrMode &AM, Type *Ty, 1272 unsigned AS, Instruction *I) const { 1273 // No global is ever allowed as a base. 1274 if (AM.BaseGV) 1275 return false; 1276 1277 if (AS == AMDGPUAS::GLOBAL_ADDRESS) 1278 return isLegalGlobalAddressingMode(AM); 1279 1280 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 1281 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 1282 AS == AMDGPUAS::BUFFER_FAT_POINTER) { 1283 // If the offset isn't a multiple of 4, it probably isn't going to be 1284 // correctly aligned. 1285 // FIXME: Can we get the real alignment here? 1286 if (AM.BaseOffs % 4 != 0) 1287 return isLegalMUBUFAddressingMode(AM); 1288 1289 // There are no SMRD extloads, so if we have to do a small type access we 1290 // will use a MUBUF load. 1291 // FIXME?: We also need to do this if unaligned, but we don't know the 1292 // alignment here. 1293 if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4) 1294 return isLegalGlobalAddressingMode(AM); 1295 1296 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { 1297 // SMRD instructions have an 8-bit, dword offset on SI. 1298 if (!isUInt<8>(AM.BaseOffs / 4)) 1299 return false; 1300 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { 1301 // On CI+, this can also be a 32-bit literal constant offset. If it fits 1302 // in 8-bits, it can use a smaller encoding. 1303 if (!isUInt<32>(AM.BaseOffs / 4)) 1304 return false; 1305 } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 1306 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 1307 if (!isUInt<20>(AM.BaseOffs)) 1308 return false; 1309 } else 1310 llvm_unreachable("unhandled generation"); 1311 1312 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 1313 return true; 1314 1315 if (AM.Scale == 1 && AM.HasBaseReg) 1316 return true; 1317 1318 return false; 1319 1320 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 1321 return isLegalMUBUFAddressingMode(AM); 1322 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || 1323 AS == AMDGPUAS::REGION_ADDRESS) { 1324 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 1325 // field. 1326 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 1327 // an 8-bit dword offset but we don't know the alignment here. 1328 if (!isUInt<16>(AM.BaseOffs)) 1329 return false; 1330 1331 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 1332 return true; 1333 1334 if (AM.Scale == 1 && AM.HasBaseReg) 1335 return true; 1336 1337 return false; 1338 } else if (AS == AMDGPUAS::FLAT_ADDRESS || 1339 AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) { 1340 // For an unknown address space, this usually means that this is for some 1341 // reason being used for pure arithmetic, and not based on some addressing 1342 // computation. We don't have instructions that compute pointers with any 1343 // addressing modes, so treat them as having no offset like flat 1344 // instructions. 1345 return isLegalFlatAddressingMode(AM); 1346 } 1347 1348 // Assume a user alias of global for unknown address spaces. 1349 return isLegalGlobalAddressingMode(AM); 1350 } 1351 1352 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, 1353 const MachineFunction &MF) const { 1354 if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) { 1355 return (MemVT.getSizeInBits() <= 4 * 32); 1356 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 1357 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); 1358 return (MemVT.getSizeInBits() <= MaxPrivateBits); 1359 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { 1360 return (MemVT.getSizeInBits() <= 2 * 32); 1361 } 1362 return true; 1363 } 1364 1365 bool SITargetLowering::allowsMisalignedMemoryAccessesImpl( 1366 unsigned Size, unsigned AddrSpace, Align Alignment, 1367 MachineMemOperand::Flags Flags, bool *IsFast) const { 1368 if (IsFast) 1369 *IsFast = false; 1370 1371 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || 1372 AddrSpace == AMDGPUAS::REGION_ADDRESS) { 1373 // Check if alignment requirements for ds_read/write instructions are 1374 // disabled. 1375 if (!Subtarget->hasUnalignedDSAccessEnabled() && Alignment < Align(4)) 1376 return false; 1377 1378 Align RequiredAlignment(PowerOf2Ceil(Size/8)); // Natural alignment. 1379 if (Subtarget->hasLDSMisalignedBug() && Size > 32 && 1380 Alignment < RequiredAlignment) 1381 return false; 1382 1383 // Either, the alignment requirements are "enabled", or there is an 1384 // unaligned LDS access related hardware bug though alignment requirements 1385 // are "disabled". In either case, we need to check for proper alignment 1386 // requirements. 1387 // 1388 switch (Size) { 1389 case 64: 1390 // SI has a hardware bug in the LDS / GDS bounds checking: if the base 1391 // address is negative, then the instruction is incorrectly treated as 1392 // out-of-bounds even if base + offsets is in bounds. Split vectorized 1393 // loads here to avoid emitting ds_read2_b32. We may re-combine the 1394 // load later in the SILoadStoreOptimizer. 1395 if (!Subtarget->hasUsableDSOffset() && Alignment < Align(8)) 1396 return false; 1397 1398 // 8 byte accessing via ds_read/write_b64 require 8-byte alignment, but we 1399 // can do a 4 byte aligned, 8 byte access in a single operation using 1400 // ds_read2/write2_b32 with adjacent offsets. 1401 RequiredAlignment = Align(4); 1402 1403 if (Subtarget->hasUnalignedDSAccessEnabled()) { 1404 // We will either select ds_read_b64/ds_write_b64 or ds_read2_b32/ 1405 // ds_write2_b32 depending on the alignment. In either case with either 1406 // alignment there is no faster way of doing this. 1407 if (IsFast) 1408 *IsFast = true; 1409 return true; 1410 } 1411 1412 break; 1413 case 96: 1414 if (!Subtarget->hasDS96AndDS128()) 1415 return false; 1416 1417 // 12 byte accessing via ds_read/write_b96 require 16-byte alignment on 1418 // gfx8 and older. 1419 1420 if (Subtarget->hasUnalignedDSAccessEnabled()) { 1421 // Naturally aligned access is fastest. However, also report it is Fast 1422 // if memory is aligned less than DWORD. A narrow load or store will be 1423 // be equally slow as a single ds_read_b96/ds_write_b96, but there will 1424 // be more of them, so overall we will pay less penalty issuing a single 1425 // instruction. 1426 if (IsFast) 1427 *IsFast = Alignment >= RequiredAlignment || Alignment < Align(4); 1428 return true; 1429 } 1430 1431 break; 1432 case 128: 1433 if (!Subtarget->hasDS96AndDS128() || !Subtarget->useDS128()) 1434 return false; 1435 1436 // 16 byte accessing via ds_read/write_b128 require 16-byte alignment on 1437 // gfx8 and older, but we can do a 8 byte aligned, 16 byte access in a 1438 // single operation using ds_read2/write2_b64. 1439 RequiredAlignment = Align(8); 1440 1441 if (Subtarget->hasUnalignedDSAccessEnabled()) { 1442 // Naturally aligned access is fastest. However, also report it is Fast 1443 // if memory is aligned less than DWORD. A narrow load or store will be 1444 // be equally slow as a single ds_read_b128/ds_write_b128, but there 1445 // will be more of them, so overall we will pay less penalty issuing a 1446 // single instruction. 1447 if (IsFast) 1448 *IsFast = Alignment >= RequiredAlignment || Alignment < Align(4); 1449 return true; 1450 } 1451 1452 break; 1453 default: 1454 if (Size > 32) 1455 return false; 1456 1457 break; 1458 } 1459 1460 if (IsFast) 1461 *IsFast = Alignment >= RequiredAlignment; 1462 1463 return Alignment >= RequiredAlignment || 1464 Subtarget->hasUnalignedDSAccessEnabled(); 1465 } 1466 1467 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) { 1468 bool AlignedBy4 = Alignment >= Align(4); 1469 if (IsFast) 1470 *IsFast = AlignedBy4; 1471 1472 return AlignedBy4 || 1473 Subtarget->enableFlatScratch() || 1474 Subtarget->hasUnalignedScratchAccess(); 1475 } 1476 1477 // FIXME: We have to be conservative here and assume that flat operations 1478 // will access scratch. If we had access to the IR function, then we 1479 // could determine if any private memory was used in the function. 1480 if (AddrSpace == AMDGPUAS::FLAT_ADDRESS && 1481 !Subtarget->hasUnalignedScratchAccess()) { 1482 bool AlignedBy4 = Alignment >= Align(4); 1483 if (IsFast) 1484 *IsFast = AlignedBy4; 1485 1486 return AlignedBy4; 1487 } 1488 1489 if (Subtarget->hasUnalignedBufferAccessEnabled()) { 1490 // If we have a uniform constant load, it still requires using a slow 1491 // buffer instruction if unaligned. 1492 if (IsFast) { 1493 // Accesses can really be issued as 1-byte aligned or 4-byte aligned, so 1494 // 2-byte alignment is worse than 1 unless doing a 2-byte access. 1495 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || 1496 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ? 1497 Alignment >= Align(4) : Alignment != Align(2); 1498 } 1499 1500 return true; 1501 } 1502 1503 // Smaller than dword value must be aligned. 1504 if (Size < 32) 1505 return false; 1506 1507 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 1508 // byte-address are ignored, thus forcing Dword alignment. 1509 // This applies to private, global, and constant memory. 1510 if (IsFast) 1511 *IsFast = true; 1512 1513 return Size >= 32 && Alignment >= Align(4); 1514 } 1515 1516 bool SITargetLowering::allowsMisalignedMemoryAccesses( 1517 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, 1518 bool *IsFast) const { 1519 bool Allow = allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace, 1520 Alignment, Flags, IsFast); 1521 1522 if (Allow && IsFast && Subtarget->hasUnalignedDSAccessEnabled() && 1523 (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || 1524 AddrSpace == AMDGPUAS::REGION_ADDRESS)) { 1525 // Lie it is fast if +unaligned-access-mode is passed so that DS accesses 1526 // get vectorized. We could use ds_read2_b*/ds_write2_b* instructions on a 1527 // misaligned data which is faster than a pair of ds_read_b*/ds_write_b* 1528 // which would be equally misaligned. 1529 // This is only used by the common passes, selection always calls the 1530 // allowsMisalignedMemoryAccessesImpl version. 1531 *IsFast = true; 1532 } 1533 1534 return Allow; 1535 } 1536 1537 EVT SITargetLowering::getOptimalMemOpType( 1538 const MemOp &Op, const AttributeList &FuncAttributes) const { 1539 // FIXME: Should account for address space here. 1540 1541 // The default fallback uses the private pointer size as a guess for a type to 1542 // use. Make sure we switch these to 64-bit accesses. 1543 1544 if (Op.size() >= 16 && 1545 Op.isDstAligned(Align(4))) // XXX: Should only do for global 1546 return MVT::v4i32; 1547 1548 if (Op.size() >= 8 && Op.isDstAligned(Align(4))) 1549 return MVT::v2i32; 1550 1551 // Use the default. 1552 return MVT::Other; 1553 } 1554 1555 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { 1556 const MemSDNode *MemNode = cast<MemSDNode>(N); 1557 return MemNode->getMemOperand()->getFlags() & MONoClobber; 1558 } 1559 1560 bool SITargetLowering::isNonGlobalAddrSpace(unsigned AS) { 1561 return AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS || 1562 AS == AMDGPUAS::PRIVATE_ADDRESS; 1563 } 1564 1565 bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS, 1566 unsigned DestAS) const { 1567 // Flat -> private/local is a simple truncate. 1568 // Flat -> global is no-op 1569 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) 1570 return true; 1571 1572 const GCNTargetMachine &TM = 1573 static_cast<const GCNTargetMachine &>(getTargetMachine()); 1574 return TM.isNoopAddrSpaceCast(SrcAS, DestAS); 1575 } 1576 1577 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 1578 const MemSDNode *MemNode = cast<MemSDNode>(N); 1579 1580 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand()); 1581 } 1582 1583 TargetLoweringBase::LegalizeTypeAction 1584 SITargetLowering::getPreferredVectorAction(MVT VT) const { 1585 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 && 1586 VT.getScalarType().bitsLE(MVT::i16)) 1587 return VT.isPow2VectorType() ? TypeSplitVector : TypeWidenVector; 1588 return TargetLoweringBase::getPreferredVectorAction(VT); 1589 } 1590 1591 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 1592 Type *Ty) const { 1593 // FIXME: Could be smarter if called for vector constants. 1594 return true; 1595 } 1596 1597 bool SITargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 1598 unsigned Index) const { 1599 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) 1600 return false; 1601 1602 // TODO: Add more cases that are cheap. 1603 return Index == 0; 1604 } 1605 1606 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 1607 if (Subtarget->has16BitInsts() && VT == MVT::i16) { 1608 switch (Op) { 1609 case ISD::LOAD: 1610 case ISD::STORE: 1611 1612 // These operations are done with 32-bit instructions anyway. 1613 case ISD::AND: 1614 case ISD::OR: 1615 case ISD::XOR: 1616 case ISD::SELECT: 1617 // TODO: Extensions? 1618 return true; 1619 default: 1620 return false; 1621 } 1622 } 1623 1624 // SimplifySetCC uses this function to determine whether or not it should 1625 // create setcc with i1 operands. We don't have instructions for i1 setcc. 1626 if (VT == MVT::i1 && Op == ISD::SETCC) 1627 return false; 1628 1629 return TargetLowering::isTypeDesirableForOp(Op, VT); 1630 } 1631 1632 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, 1633 const SDLoc &SL, 1634 SDValue Chain, 1635 uint64_t Offset) const { 1636 const DataLayout &DL = DAG.getDataLayout(); 1637 MachineFunction &MF = DAG.getMachineFunction(); 1638 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1639 1640 const ArgDescriptor *InputPtrReg; 1641 const TargetRegisterClass *RC; 1642 LLT ArgTy; 1643 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 1644 1645 std::tie(InputPtrReg, RC, ArgTy) = 1646 Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1647 1648 // We may not have the kernarg segment argument if we have no kernel 1649 // arguments. 1650 if (!InputPtrReg) 1651 return DAG.getConstant(0, SL, PtrVT); 1652 1653 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1654 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 1655 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT); 1656 1657 return DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Offset)); 1658 } 1659 1660 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG, 1661 const SDLoc &SL) const { 1662 uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(), 1663 FIRST_IMPLICIT); 1664 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset); 1665 } 1666 1667 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, 1668 const SDLoc &SL, SDValue Val, 1669 bool Signed, 1670 const ISD::InputArg *Arg) const { 1671 // First, if it is a widened vector, narrow it. 1672 if (VT.isVector() && 1673 VT.getVectorNumElements() != MemVT.getVectorNumElements()) { 1674 EVT NarrowedVT = 1675 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 1676 VT.getVectorNumElements()); 1677 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val, 1678 DAG.getConstant(0, SL, MVT::i32)); 1679 } 1680 1681 // Then convert the vector elements or scalar value. 1682 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && 1683 VT.bitsLT(MemVT)) { 1684 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; 1685 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); 1686 } 1687 1688 if (MemVT.isFloatingPoint()) 1689 Val = getFPExtOrFPRound(DAG, Val, SL, VT); 1690 else if (Signed) 1691 Val = DAG.getSExtOrTrunc(Val, SL, VT); 1692 else 1693 Val = DAG.getZExtOrTrunc(Val, SL, VT); 1694 1695 return Val; 1696 } 1697 1698 SDValue SITargetLowering::lowerKernargMemParameter( 1699 SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Chain, 1700 uint64_t Offset, Align Alignment, bool Signed, 1701 const ISD::InputArg *Arg) const { 1702 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 1703 1704 // Try to avoid using an extload by loading earlier than the argument address, 1705 // and extracting the relevant bits. The load should hopefully be merged with 1706 // the previous argument. 1707 if (MemVT.getStoreSize() < 4 && Alignment < 4) { 1708 // TODO: Handle align < 4 and size >= 4 (can happen with packed structs). 1709 int64_t AlignDownOffset = alignDown(Offset, 4); 1710 int64_t OffsetDiff = Offset - AlignDownOffset; 1711 1712 EVT IntVT = MemVT.changeTypeToInteger(); 1713 1714 // TODO: If we passed in the base kernel offset we could have a better 1715 // alignment than 4, but we don't really need it. 1716 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset); 1717 SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, Align(4), 1718 MachineMemOperand::MODereferenceable | 1719 MachineMemOperand::MOInvariant); 1720 1721 SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32); 1722 SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt); 1723 1724 SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract); 1725 ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal); 1726 ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg); 1727 1728 1729 return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL); 1730 } 1731 1732 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); 1733 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Alignment, 1734 MachineMemOperand::MODereferenceable | 1735 MachineMemOperand::MOInvariant); 1736 1737 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); 1738 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); 1739 } 1740 1741 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, 1742 const SDLoc &SL, SDValue Chain, 1743 const ISD::InputArg &Arg) const { 1744 MachineFunction &MF = DAG.getMachineFunction(); 1745 MachineFrameInfo &MFI = MF.getFrameInfo(); 1746 1747 if (Arg.Flags.isByVal()) { 1748 unsigned Size = Arg.Flags.getByValSize(); 1749 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); 1750 return DAG.getFrameIndex(FrameIdx, MVT::i32); 1751 } 1752 1753 unsigned ArgOffset = VA.getLocMemOffset(); 1754 unsigned ArgSize = VA.getValVT().getStoreSize(); 1755 1756 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); 1757 1758 // Create load nodes to retrieve arguments from the stack. 1759 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1760 SDValue ArgValue; 1761 1762 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) 1763 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 1764 MVT MemVT = VA.getValVT(); 1765 1766 switch (VA.getLocInfo()) { 1767 default: 1768 break; 1769 case CCValAssign::BCvt: 1770 MemVT = VA.getLocVT(); 1771 break; 1772 case CCValAssign::SExt: 1773 ExtType = ISD::SEXTLOAD; 1774 break; 1775 case CCValAssign::ZExt: 1776 ExtType = ISD::ZEXTLOAD; 1777 break; 1778 case CCValAssign::AExt: 1779 ExtType = ISD::EXTLOAD; 1780 break; 1781 } 1782 1783 ArgValue = DAG.getExtLoad( 1784 ExtType, SL, VA.getLocVT(), Chain, FIN, 1785 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 1786 MemVT); 1787 return ArgValue; 1788 } 1789 1790 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG, 1791 const SIMachineFunctionInfo &MFI, 1792 EVT VT, 1793 AMDGPUFunctionArgInfo::PreloadedValue PVID) const { 1794 const ArgDescriptor *Reg; 1795 const TargetRegisterClass *RC; 1796 LLT Ty; 1797 1798 std::tie(Reg, RC, Ty) = MFI.getPreloadedValue(PVID); 1799 if (!Reg) { 1800 if (PVID == AMDGPUFunctionArgInfo::PreloadedValue::KERNARG_SEGMENT_PTR) { 1801 // It's possible for a kernarg intrinsic call to appear in a kernel with 1802 // no allocated segment, in which case we do not add the user sgpr 1803 // argument, so just return null. 1804 return DAG.getConstant(0, SDLoc(), VT); 1805 } 1806 1807 // It's undefined behavior if a function marked with the amdgpu-no-* 1808 // attributes uses the corresponding intrinsic. 1809 return DAG.getUNDEF(VT); 1810 } 1811 1812 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT); 1813 } 1814 1815 static void processPSInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, 1816 CallingConv::ID CallConv, 1817 ArrayRef<ISD::InputArg> Ins, BitVector &Skipped, 1818 FunctionType *FType, 1819 SIMachineFunctionInfo *Info) { 1820 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { 1821 const ISD::InputArg *Arg = &Ins[I]; 1822 1823 assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && 1824 "vector type argument should have been split"); 1825 1826 // First check if it's a PS input addr. 1827 if (CallConv == CallingConv::AMDGPU_PS && 1828 !Arg->Flags.isInReg() && PSInputNum <= 15) { 1829 bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum); 1830 1831 // Inconveniently only the first part of the split is marked as isSplit, 1832 // so skip to the end. We only want to increment PSInputNum once for the 1833 // entire split argument. 1834 if (Arg->Flags.isSplit()) { 1835 while (!Arg->Flags.isSplitEnd()) { 1836 assert((!Arg->VT.isVector() || 1837 Arg->VT.getScalarSizeInBits() == 16) && 1838 "unexpected vector split in ps argument type"); 1839 if (!SkipArg) 1840 Splits.push_back(*Arg); 1841 Arg = &Ins[++I]; 1842 } 1843 } 1844 1845 if (SkipArg) { 1846 // We can safely skip PS inputs. 1847 Skipped.set(Arg->getOrigArgIndex()); 1848 ++PSInputNum; 1849 continue; 1850 } 1851 1852 Info->markPSInputAllocated(PSInputNum); 1853 if (Arg->Used) 1854 Info->markPSInputEnabled(PSInputNum); 1855 1856 ++PSInputNum; 1857 } 1858 1859 Splits.push_back(*Arg); 1860 } 1861 } 1862 1863 // Allocate special inputs passed in VGPRs. 1864 void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo, 1865 MachineFunction &MF, 1866 const SIRegisterInfo &TRI, 1867 SIMachineFunctionInfo &Info) const { 1868 const LLT S32 = LLT::scalar(32); 1869 MachineRegisterInfo &MRI = MF.getRegInfo(); 1870 1871 if (Info.hasWorkItemIDX()) { 1872 Register Reg = AMDGPU::VGPR0; 1873 MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); 1874 1875 CCInfo.AllocateReg(Reg); 1876 unsigned Mask = (Subtarget->hasPackedTID() && 1877 Info.hasWorkItemIDY()) ? 0x3ff : ~0u; 1878 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask)); 1879 } 1880 1881 if (Info.hasWorkItemIDY()) { 1882 assert(Info.hasWorkItemIDX()); 1883 if (Subtarget->hasPackedTID()) { 1884 Info.setWorkItemIDY(ArgDescriptor::createRegister(AMDGPU::VGPR0, 1885 0x3ff << 10)); 1886 } else { 1887 unsigned Reg = AMDGPU::VGPR1; 1888 MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); 1889 1890 CCInfo.AllocateReg(Reg); 1891 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg)); 1892 } 1893 } 1894 1895 if (Info.hasWorkItemIDZ()) { 1896 assert(Info.hasWorkItemIDX() && Info.hasWorkItemIDY()); 1897 if (Subtarget->hasPackedTID()) { 1898 Info.setWorkItemIDZ(ArgDescriptor::createRegister(AMDGPU::VGPR0, 1899 0x3ff << 20)); 1900 } else { 1901 unsigned Reg = AMDGPU::VGPR2; 1902 MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); 1903 1904 CCInfo.AllocateReg(Reg); 1905 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg)); 1906 } 1907 } 1908 } 1909 1910 // Try to allocate a VGPR at the end of the argument list, or if no argument 1911 // VGPRs are left allocating a stack slot. 1912 // If \p Mask is is given it indicates bitfield position in the register. 1913 // If \p Arg is given use it with new ]p Mask instead of allocating new. 1914 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u, 1915 ArgDescriptor Arg = ArgDescriptor()) { 1916 if (Arg.isSet()) 1917 return ArgDescriptor::createArg(Arg, Mask); 1918 1919 ArrayRef<MCPhysReg> ArgVGPRs 1920 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32); 1921 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); 1922 if (RegIdx == ArgVGPRs.size()) { 1923 // Spill to stack required. 1924 int64_t Offset = CCInfo.AllocateStack(4, Align(4)); 1925 1926 return ArgDescriptor::createStack(Offset, Mask); 1927 } 1928 1929 unsigned Reg = ArgVGPRs[RegIdx]; 1930 Reg = CCInfo.AllocateReg(Reg); 1931 assert(Reg != AMDGPU::NoRegister); 1932 1933 MachineFunction &MF = CCInfo.getMachineFunction(); 1934 Register LiveInVReg = MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1935 MF.getRegInfo().setType(LiveInVReg, LLT::scalar(32)); 1936 return ArgDescriptor::createRegister(Reg, Mask); 1937 } 1938 1939 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo, 1940 const TargetRegisterClass *RC, 1941 unsigned NumArgRegs) { 1942 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32); 1943 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs); 1944 if (RegIdx == ArgSGPRs.size()) 1945 report_fatal_error("ran out of SGPRs for arguments"); 1946 1947 unsigned Reg = ArgSGPRs[RegIdx]; 1948 Reg = CCInfo.AllocateReg(Reg); 1949 assert(Reg != AMDGPU::NoRegister); 1950 1951 MachineFunction &MF = CCInfo.getMachineFunction(); 1952 MF.addLiveIn(Reg, RC); 1953 return ArgDescriptor::createRegister(Reg); 1954 } 1955 1956 // If this has a fixed position, we still should allocate the register in the 1957 // CCInfo state. Technically we could get away with this for values passed 1958 // outside of the normal argument range. 1959 static void allocateFixedSGPRInputImpl(CCState &CCInfo, 1960 const TargetRegisterClass *RC, 1961 MCRegister Reg) { 1962 Reg = CCInfo.AllocateReg(Reg); 1963 assert(Reg != AMDGPU::NoRegister); 1964 MachineFunction &MF = CCInfo.getMachineFunction(); 1965 MF.addLiveIn(Reg, RC); 1966 } 1967 1968 static void allocateSGPR32Input(CCState &CCInfo, ArgDescriptor &Arg) { 1969 if (Arg) { 1970 allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 1971 Arg.getRegister()); 1972 } else 1973 Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32); 1974 } 1975 1976 static void allocateSGPR64Input(CCState &CCInfo, ArgDescriptor &Arg) { 1977 if (Arg) { 1978 allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 1979 Arg.getRegister()); 1980 } else 1981 Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16); 1982 } 1983 1984 /// Allocate implicit function VGPR arguments at the end of allocated user 1985 /// arguments. 1986 void SITargetLowering::allocateSpecialInputVGPRs( 1987 CCState &CCInfo, MachineFunction &MF, 1988 const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { 1989 const unsigned Mask = 0x3ff; 1990 ArgDescriptor Arg; 1991 1992 if (Info.hasWorkItemIDX()) { 1993 Arg = allocateVGPR32Input(CCInfo, Mask); 1994 Info.setWorkItemIDX(Arg); 1995 } 1996 1997 if (Info.hasWorkItemIDY()) { 1998 Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg); 1999 Info.setWorkItemIDY(Arg); 2000 } 2001 2002 if (Info.hasWorkItemIDZ()) 2003 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg)); 2004 } 2005 2006 /// Allocate implicit function VGPR arguments in fixed registers. 2007 void SITargetLowering::allocateSpecialInputVGPRsFixed( 2008 CCState &CCInfo, MachineFunction &MF, 2009 const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { 2010 Register Reg = CCInfo.AllocateReg(AMDGPU::VGPR31); 2011 if (!Reg) 2012 report_fatal_error("failed to allocated VGPR for implicit arguments"); 2013 2014 const unsigned Mask = 0x3ff; 2015 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask)); 2016 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg, Mask << 10)); 2017 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg, Mask << 20)); 2018 } 2019 2020 void SITargetLowering::allocateSpecialInputSGPRs( 2021 CCState &CCInfo, 2022 MachineFunction &MF, 2023 const SIRegisterInfo &TRI, 2024 SIMachineFunctionInfo &Info) const { 2025 auto &ArgInfo = Info.getArgInfo(); 2026 2027 // TODO: Unify handling with private memory pointers. 2028 if (Info.hasDispatchPtr()) 2029 allocateSGPR64Input(CCInfo, ArgInfo.DispatchPtr); 2030 2031 if (Info.hasQueuePtr() && AMDGPU::getAmdhsaCodeObjectVersion() < 5) 2032 allocateSGPR64Input(CCInfo, ArgInfo.QueuePtr); 2033 2034 // Implicit arg ptr takes the place of the kernarg segment pointer. This is a 2035 // constant offset from the kernarg segment. 2036 if (Info.hasImplicitArgPtr()) 2037 allocateSGPR64Input(CCInfo, ArgInfo.ImplicitArgPtr); 2038 2039 if (Info.hasDispatchID()) 2040 allocateSGPR64Input(CCInfo, ArgInfo.DispatchID); 2041 2042 // flat_scratch_init is not applicable for non-kernel functions. 2043 2044 if (Info.hasWorkGroupIDX()) 2045 allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDX); 2046 2047 if (Info.hasWorkGroupIDY()) 2048 allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDY); 2049 2050 if (Info.hasWorkGroupIDZ()) 2051 allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDZ); 2052 } 2053 2054 // Allocate special inputs passed in user SGPRs. 2055 void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo, 2056 MachineFunction &MF, 2057 const SIRegisterInfo &TRI, 2058 SIMachineFunctionInfo &Info) const { 2059 if (Info.hasImplicitBufferPtr()) { 2060 Register ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); 2061 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 2062 CCInfo.AllocateReg(ImplicitBufferPtrReg); 2063 } 2064 2065 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 2066 if (Info.hasPrivateSegmentBuffer()) { 2067 Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 2068 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 2069 CCInfo.AllocateReg(PrivateSegmentBufferReg); 2070 } 2071 2072 if (Info.hasDispatchPtr()) { 2073 Register DispatchPtrReg = Info.addDispatchPtr(TRI); 2074 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 2075 CCInfo.AllocateReg(DispatchPtrReg); 2076 } 2077 2078 if (Info.hasQueuePtr() && AMDGPU::getAmdhsaCodeObjectVersion() < 5) { 2079 Register QueuePtrReg = Info.addQueuePtr(TRI); 2080 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 2081 CCInfo.AllocateReg(QueuePtrReg); 2082 } 2083 2084 if (Info.hasKernargSegmentPtr()) { 2085 MachineRegisterInfo &MRI = MF.getRegInfo(); 2086 Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 2087 CCInfo.AllocateReg(InputPtrReg); 2088 2089 Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); 2090 MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); 2091 } 2092 2093 if (Info.hasDispatchID()) { 2094 Register DispatchIDReg = Info.addDispatchID(TRI); 2095 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 2096 CCInfo.AllocateReg(DispatchIDReg); 2097 } 2098 2099 if (Info.hasFlatScratchInit() && !getSubtarget()->isAmdPalOS()) { 2100 Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); 2101 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 2102 CCInfo.AllocateReg(FlatScratchInitReg); 2103 } 2104 2105 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 2106 // these from the dispatch pointer. 2107 } 2108 2109 // Allocate special input registers that are initialized per-wave. 2110 void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo, 2111 MachineFunction &MF, 2112 SIMachineFunctionInfo &Info, 2113 CallingConv::ID CallConv, 2114 bool IsShader) const { 2115 if (Subtarget->hasUserSGPRInit16Bug()) { 2116 // Pad up the used user SGPRs with dead inputs. 2117 unsigned CurrentUserSGPRs = Info.getNumUserSGPRs(); 2118 2119 // Note we do not count the PrivateSegmentWaveByteOffset. We do not want to 2120 // rely on it to reach 16 since if we end up having no stack usage, it will 2121 // not really be added. 2122 unsigned NumRequiredSystemSGPRs = Info.hasWorkGroupIDX() + 2123 Info.hasWorkGroupIDY() + 2124 Info.hasWorkGroupIDZ() + 2125 Info.hasWorkGroupInfo(); 2126 for (unsigned i = NumRequiredSystemSGPRs + CurrentUserSGPRs; i < 16; ++i) { 2127 Register Reg = Info.addReservedUserSGPR(); 2128 MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); 2129 CCInfo.AllocateReg(Reg); 2130 } 2131 } 2132 2133 if (Info.hasWorkGroupIDX()) { 2134 Register Reg = Info.addWorkGroupIDX(); 2135 MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); 2136 CCInfo.AllocateReg(Reg); 2137 } 2138 2139 if (Info.hasWorkGroupIDY()) { 2140 Register Reg = Info.addWorkGroupIDY(); 2141 MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); 2142 CCInfo.AllocateReg(Reg); 2143 } 2144 2145 if (Info.hasWorkGroupIDZ()) { 2146 Register Reg = Info.addWorkGroupIDZ(); 2147 MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); 2148 CCInfo.AllocateReg(Reg); 2149 } 2150 2151 if (Info.hasWorkGroupInfo()) { 2152 Register Reg = Info.addWorkGroupInfo(); 2153 MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); 2154 CCInfo.AllocateReg(Reg); 2155 } 2156 2157 if (Info.hasPrivateSegmentWaveByteOffset()) { 2158 // Scratch wave offset passed in system SGPR. 2159 unsigned PrivateSegmentWaveByteOffsetReg; 2160 2161 if (IsShader) { 2162 PrivateSegmentWaveByteOffsetReg = 2163 Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); 2164 2165 // This is true if the scratch wave byte offset doesn't have a fixed 2166 // location. 2167 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { 2168 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 2169 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 2170 } 2171 } else 2172 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); 2173 2174 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 2175 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 2176 } 2177 2178 assert(!Subtarget->hasUserSGPRInit16Bug() || Info.getNumPreloadedSGPRs() >= 16); 2179 } 2180 2181 static void reservePrivateMemoryRegs(const TargetMachine &TM, 2182 MachineFunction &MF, 2183 const SIRegisterInfo &TRI, 2184 SIMachineFunctionInfo &Info) { 2185 // Now that we've figured out where the scratch register inputs are, see if 2186 // should reserve the arguments and use them directly. 2187 MachineFrameInfo &MFI = MF.getFrameInfo(); 2188 bool HasStackObjects = MFI.hasStackObjects(); 2189 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 2190 2191 // Record that we know we have non-spill stack objects so we don't need to 2192 // check all stack objects later. 2193 if (HasStackObjects) 2194 Info.setHasNonSpillStackObjects(true); 2195 2196 // Everything live out of a block is spilled with fast regalloc, so it's 2197 // almost certain that spilling will be required. 2198 if (TM.getOptLevel() == CodeGenOpt::None) 2199 HasStackObjects = true; 2200 2201 // For now assume stack access is needed in any callee functions, so we need 2202 // the scratch registers to pass in. 2203 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls(); 2204 2205 if (!ST.enableFlatScratch()) { 2206 if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) { 2207 // If we have stack objects, we unquestionably need the private buffer 2208 // resource. For the Code Object V2 ABI, this will be the first 4 user 2209 // SGPR inputs. We can reserve those and use them directly. 2210 2211 Register PrivateSegmentBufferReg = 2212 Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 2213 Info.setScratchRSrcReg(PrivateSegmentBufferReg); 2214 } else { 2215 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); 2216 // We tentatively reserve the last registers (skipping the last registers 2217 // which may contain VCC, FLAT_SCR, and XNACK). After register allocation, 2218 // we'll replace these with the ones immediately after those which were 2219 // really allocated. In the prologue copies will be inserted from the 2220 // argument to these reserved registers. 2221 2222 // Without HSA, relocations are used for the scratch pointer and the 2223 // buffer resource setup is always inserted in the prologue. Scratch wave 2224 // offset is still in an input SGPR. 2225 Info.setScratchRSrcReg(ReservedBufferReg); 2226 } 2227 } 2228 2229 MachineRegisterInfo &MRI = MF.getRegInfo(); 2230 2231 // For entry functions we have to set up the stack pointer if we use it, 2232 // whereas non-entry functions get this "for free". This means there is no 2233 // intrinsic advantage to using S32 over S34 in cases where we do not have 2234 // calls but do need a frame pointer (i.e. if we are requested to have one 2235 // because frame pointer elimination is disabled). To keep things simple we 2236 // only ever use S32 as the call ABI stack pointer, and so using it does not 2237 // imply we need a separate frame pointer. 2238 // 2239 // Try to use s32 as the SP, but move it if it would interfere with input 2240 // arguments. This won't work with calls though. 2241 // 2242 // FIXME: Move SP to avoid any possible inputs, or find a way to spill input 2243 // registers. 2244 if (!MRI.isLiveIn(AMDGPU::SGPR32)) { 2245 Info.setStackPtrOffsetReg(AMDGPU::SGPR32); 2246 } else { 2247 assert(AMDGPU::isShader(MF.getFunction().getCallingConv())); 2248 2249 if (MFI.hasCalls()) 2250 report_fatal_error("call in graphics shader with too many input SGPRs"); 2251 2252 for (unsigned Reg : AMDGPU::SGPR_32RegClass) { 2253 if (!MRI.isLiveIn(Reg)) { 2254 Info.setStackPtrOffsetReg(Reg); 2255 break; 2256 } 2257 } 2258 2259 if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG) 2260 report_fatal_error("failed to find register for SP"); 2261 } 2262 2263 // hasFP should be accurate for entry functions even before the frame is 2264 // finalized, because it does not rely on the known stack size, only 2265 // properties like whether variable sized objects are present. 2266 if (ST.getFrameLowering()->hasFP(MF)) { 2267 Info.setFrameOffsetReg(AMDGPU::SGPR33); 2268 } 2269 } 2270 2271 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { 2272 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 2273 return !Info->isEntryFunction(); 2274 } 2275 2276 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 2277 2278 } 2279 2280 void SITargetLowering::insertCopiesSplitCSR( 2281 MachineBasicBlock *Entry, 2282 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 2283 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2284 2285 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 2286 if (!IStart) 2287 return; 2288 2289 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 2290 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 2291 MachineBasicBlock::iterator MBBI = Entry->begin(); 2292 for (const MCPhysReg *I = IStart; *I; ++I) { 2293 const TargetRegisterClass *RC = nullptr; 2294 if (AMDGPU::SReg_64RegClass.contains(*I)) 2295 RC = &AMDGPU::SGPR_64RegClass; 2296 else if (AMDGPU::SReg_32RegClass.contains(*I)) 2297 RC = &AMDGPU::SGPR_32RegClass; 2298 else 2299 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 2300 2301 Register NewVR = MRI->createVirtualRegister(RC); 2302 // Create copy from CSR to a virtual register. 2303 Entry->addLiveIn(*I); 2304 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 2305 .addReg(*I); 2306 2307 // Insert the copy-back instructions right before the terminator. 2308 for (auto *Exit : Exits) 2309 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 2310 TII->get(TargetOpcode::COPY), *I) 2311 .addReg(NewVR); 2312 } 2313 } 2314 2315 SDValue SITargetLowering::LowerFormalArguments( 2316 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 2317 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 2318 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2319 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2320 2321 MachineFunction &MF = DAG.getMachineFunction(); 2322 const Function &Fn = MF.getFunction(); 2323 FunctionType *FType = MF.getFunction().getFunctionType(); 2324 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2325 2326 if (Subtarget->isAmdHsaOS() && AMDGPU::isGraphics(CallConv)) { 2327 DiagnosticInfoUnsupported NoGraphicsHSA( 2328 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 2329 DAG.getContext()->diagnose(NoGraphicsHSA); 2330 return DAG.getEntryNode(); 2331 } 2332 2333 Info->allocateModuleLDSGlobal(Fn); 2334 2335 SmallVector<ISD::InputArg, 16> Splits; 2336 SmallVector<CCValAssign, 16> ArgLocs; 2337 BitVector Skipped(Ins.size()); 2338 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2339 *DAG.getContext()); 2340 2341 bool IsGraphics = AMDGPU::isGraphics(CallConv); 2342 bool IsKernel = AMDGPU::isKernel(CallConv); 2343 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); 2344 2345 if (IsGraphics) { 2346 assert(!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && 2347 (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && 2348 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 2349 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 2350 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 2351 !Info->hasWorkItemIDZ()); 2352 } 2353 2354 if (CallConv == CallingConv::AMDGPU_PS) { 2355 processPSInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); 2356 2357 // At least one interpolation mode must be enabled or else the GPU will 2358 // hang. 2359 // 2360 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 2361 // set PSInputAddr, the user wants to enable some bits after the compilation 2362 // based on run-time states. Since we can't know what the final PSInputEna 2363 // will look like, so we shouldn't do anything here and the user should take 2364 // responsibility for the correct programming. 2365 // 2366 // Otherwise, the following restrictions apply: 2367 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 2368 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 2369 // enabled too. 2370 if ((Info->getPSInputAddr() & 0x7F) == 0 || 2371 ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11))) { 2372 CCInfo.AllocateReg(AMDGPU::VGPR0); 2373 CCInfo.AllocateReg(AMDGPU::VGPR1); 2374 Info->markPSInputAllocated(0); 2375 Info->markPSInputEnabled(0); 2376 } 2377 if (Subtarget->isAmdPalOS()) { 2378 // For isAmdPalOS, the user does not enable some bits after compilation 2379 // based on run-time states; the register values being generated here are 2380 // the final ones set in hardware. Therefore we need to apply the 2381 // workaround to PSInputAddr and PSInputEnable together. (The case where 2382 // a bit is set in PSInputAddr but not PSInputEnable is where the 2383 // frontend set up an input arg for a particular interpolation mode, but 2384 // nothing uses that input arg. Really we should have an earlier pass 2385 // that removes such an arg.) 2386 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 2387 if ((PsInputBits & 0x7F) == 0 || 2388 ((PsInputBits & 0xF) == 0 && (PsInputBits >> 11 & 1))) 2389 Info->markPSInputEnabled( 2390 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 2391 } 2392 } else if (IsKernel) { 2393 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 2394 } else { 2395 Splits.append(Ins.begin(), Ins.end()); 2396 } 2397 2398 if (IsEntryFunc) { 2399 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 2400 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); 2401 } else if (!IsGraphics) { 2402 // For the fixed ABI, pass workitem IDs in the last argument register. 2403 allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); 2404 } 2405 2406 if (IsKernel) { 2407 analyzeFormalArgumentsCompute(CCInfo, Ins); 2408 } else { 2409 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); 2410 CCInfo.AnalyzeFormalArguments(Splits, AssignFn); 2411 } 2412 2413 SmallVector<SDValue, 16> Chains; 2414 2415 // FIXME: This is the minimum kernel argument alignment. We should improve 2416 // this to the maximum alignment of the arguments. 2417 // 2418 // FIXME: Alignment of explicit arguments totally broken with non-0 explicit 2419 // kern arg offset. 2420 const Align KernelArgBaseAlign = Align(16); 2421 2422 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 2423 const ISD::InputArg &Arg = Ins[i]; 2424 if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) { 2425 InVals.push_back(DAG.getUNDEF(Arg.VT)); 2426 continue; 2427 } 2428 2429 CCValAssign &VA = ArgLocs[ArgIdx++]; 2430 MVT VT = VA.getLocVT(); 2431 2432 if (IsEntryFunc && VA.isMemLoc()) { 2433 VT = Ins[i].VT; 2434 EVT MemVT = VA.getLocVT(); 2435 2436 const uint64_t Offset = VA.getLocMemOffset(); 2437 Align Alignment = commonAlignment(KernelArgBaseAlign, Offset); 2438 2439 if (Arg.Flags.isByRef()) { 2440 SDValue Ptr = lowerKernArgParameterPtr(DAG, DL, Chain, Offset); 2441 2442 const GCNTargetMachine &TM = 2443 static_cast<const GCNTargetMachine &>(getTargetMachine()); 2444 if (!TM.isNoopAddrSpaceCast(AMDGPUAS::CONSTANT_ADDRESS, 2445 Arg.Flags.getPointerAddrSpace())) { 2446 Ptr = DAG.getAddrSpaceCast(DL, VT, Ptr, AMDGPUAS::CONSTANT_ADDRESS, 2447 Arg.Flags.getPointerAddrSpace()); 2448 } 2449 2450 InVals.push_back(Ptr); 2451 continue; 2452 } 2453 2454 SDValue Arg = lowerKernargMemParameter( 2455 DAG, VT, MemVT, DL, Chain, Offset, Alignment, Ins[i].Flags.isSExt(), &Ins[i]); 2456 Chains.push_back(Arg.getValue(1)); 2457 2458 auto *ParamTy = 2459 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 2460 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && 2461 ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 2462 ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) { 2463 // On SI local pointers are just offsets into LDS, so they are always 2464 // less than 16-bits. On CI and newer they could potentially be 2465 // real pointers, so we can't guarantee their size. 2466 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 2467 DAG.getValueType(MVT::i16)); 2468 } 2469 2470 InVals.push_back(Arg); 2471 continue; 2472 } else if (!IsEntryFunc && VA.isMemLoc()) { 2473 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); 2474 InVals.push_back(Val); 2475 if (!Arg.Flags.isByVal()) 2476 Chains.push_back(Val.getValue(1)); 2477 continue; 2478 } 2479 2480 assert(VA.isRegLoc() && "Parameter must be in a register!"); 2481 2482 Register Reg = VA.getLocReg(); 2483 const TargetRegisterClass *RC = nullptr; 2484 if (AMDGPU::VGPR_32RegClass.contains(Reg)) 2485 RC = &AMDGPU::VGPR_32RegClass; 2486 else if (AMDGPU::SGPR_32RegClass.contains(Reg)) 2487 RC = &AMDGPU::SGPR_32RegClass; 2488 else 2489 llvm_unreachable("Unexpected register class in LowerFormalArguments!"); 2490 EVT ValVT = VA.getValVT(); 2491 2492 Reg = MF.addLiveIn(Reg, RC); 2493 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 2494 2495 if (Arg.Flags.isSRet()) { 2496 // The return object should be reasonably addressable. 2497 2498 // FIXME: This helps when the return is a real sret. If it is a 2499 // automatically inserted sret (i.e. CanLowerReturn returns false), an 2500 // extra copy is inserted in SelectionDAGBuilder which obscures this. 2501 unsigned NumBits 2502 = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex(); 2503 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 2504 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits))); 2505 } 2506 2507 // If this is an 8 or 16-bit value, it is really passed promoted 2508 // to 32 bits. Insert an assert[sz]ext to capture this, then 2509 // truncate to the right size. 2510 switch (VA.getLocInfo()) { 2511 case CCValAssign::Full: 2512 break; 2513 case CCValAssign::BCvt: 2514 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val); 2515 break; 2516 case CCValAssign::SExt: 2517 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val, 2518 DAG.getValueType(ValVT)); 2519 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 2520 break; 2521 case CCValAssign::ZExt: 2522 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 2523 DAG.getValueType(ValVT)); 2524 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 2525 break; 2526 case CCValAssign::AExt: 2527 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 2528 break; 2529 default: 2530 llvm_unreachable("Unknown loc info!"); 2531 } 2532 2533 InVals.push_back(Val); 2534 } 2535 2536 // Start adding system SGPRs. 2537 if (IsEntryFunc) { 2538 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsGraphics); 2539 } else { 2540 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 2541 if (!IsGraphics) 2542 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 2543 } 2544 2545 auto &ArgUsageInfo = 2546 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 2547 ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo()); 2548 2549 unsigned StackArgSize = CCInfo.getNextStackOffset(); 2550 Info->setBytesInStackArgArea(StackArgSize); 2551 2552 return Chains.empty() ? Chain : 2553 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 2554 } 2555 2556 // TODO: If return values can't fit in registers, we should return as many as 2557 // possible in registers before passing on stack. 2558 bool SITargetLowering::CanLowerReturn( 2559 CallingConv::ID CallConv, 2560 MachineFunction &MF, bool IsVarArg, 2561 const SmallVectorImpl<ISD::OutputArg> &Outs, 2562 LLVMContext &Context) const { 2563 // Replacing returns with sret/stack usage doesn't make sense for shaders. 2564 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn 2565 // for shaders. Vector types should be explicitly handled by CC. 2566 if (AMDGPU::isEntryFunctionCC(CallConv)) 2567 return true; 2568 2569 SmallVector<CCValAssign, 16> RVLocs; 2570 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 2571 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); 2572 } 2573 2574 SDValue 2575 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 2576 bool isVarArg, 2577 const SmallVectorImpl<ISD::OutputArg> &Outs, 2578 const SmallVectorImpl<SDValue> &OutVals, 2579 const SDLoc &DL, SelectionDAG &DAG) const { 2580 MachineFunction &MF = DAG.getMachineFunction(); 2581 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2582 2583 if (AMDGPU::isKernel(CallConv)) { 2584 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 2585 OutVals, DL, DAG); 2586 } 2587 2588 bool IsShader = AMDGPU::isShader(CallConv); 2589 2590 Info->setIfReturnsVoid(Outs.empty()); 2591 bool IsWaveEnd = Info->returnsVoid() && IsShader; 2592 2593 // CCValAssign - represent the assignment of the return value to a location. 2594 SmallVector<CCValAssign, 48> RVLocs; 2595 SmallVector<ISD::OutputArg, 48> Splits; 2596 2597 // CCState - Info about the registers and stack slots. 2598 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2599 *DAG.getContext()); 2600 2601 // Analyze outgoing return values. 2602 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); 2603 2604 SDValue Flag; 2605 SmallVector<SDValue, 48> RetOps; 2606 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2607 2608 // Copy the result values into the output registers. 2609 for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E; 2610 ++I, ++RealRVLocIdx) { 2611 CCValAssign &VA = RVLocs[I]; 2612 assert(VA.isRegLoc() && "Can only return in registers!"); 2613 // TODO: Partially return in registers if return values don't fit. 2614 SDValue Arg = OutVals[RealRVLocIdx]; 2615 2616 // Copied from other backends. 2617 switch (VA.getLocInfo()) { 2618 case CCValAssign::Full: 2619 break; 2620 case CCValAssign::BCvt: 2621 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 2622 break; 2623 case CCValAssign::SExt: 2624 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 2625 break; 2626 case CCValAssign::ZExt: 2627 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 2628 break; 2629 case CCValAssign::AExt: 2630 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 2631 break; 2632 default: 2633 llvm_unreachable("Unknown loc info!"); 2634 } 2635 2636 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 2637 Flag = Chain.getValue(1); 2638 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2639 } 2640 2641 // FIXME: Does sret work properly? 2642 if (!Info->isEntryFunction()) { 2643 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2644 const MCPhysReg *I = 2645 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 2646 if (I) { 2647 for (; *I; ++I) { 2648 if (AMDGPU::SReg_64RegClass.contains(*I)) 2649 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 2650 else if (AMDGPU::SReg_32RegClass.contains(*I)) 2651 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 2652 else 2653 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 2654 } 2655 } 2656 } 2657 2658 // Update chain and glue. 2659 RetOps[0] = Chain; 2660 if (Flag.getNode()) 2661 RetOps.push_back(Flag); 2662 2663 unsigned Opc = AMDGPUISD::ENDPGM; 2664 if (!IsWaveEnd) 2665 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; 2666 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 2667 } 2668 2669 SDValue SITargetLowering::LowerCallResult( 2670 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, 2671 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 2672 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn, 2673 SDValue ThisVal) const { 2674 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg); 2675 2676 // Assign locations to each value returned by this call. 2677 SmallVector<CCValAssign, 16> RVLocs; 2678 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 2679 *DAG.getContext()); 2680 CCInfo.AnalyzeCallResult(Ins, RetCC); 2681 2682 // Copy all of the result registers out of their specified physreg. 2683 for (unsigned i = 0; i != RVLocs.size(); ++i) { 2684 CCValAssign VA = RVLocs[i]; 2685 SDValue Val; 2686 2687 if (VA.isRegLoc()) { 2688 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); 2689 Chain = Val.getValue(1); 2690 InFlag = Val.getValue(2); 2691 } else if (VA.isMemLoc()) { 2692 report_fatal_error("TODO: return values in memory"); 2693 } else 2694 llvm_unreachable("unknown argument location type"); 2695 2696 switch (VA.getLocInfo()) { 2697 case CCValAssign::Full: 2698 break; 2699 case CCValAssign::BCvt: 2700 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 2701 break; 2702 case CCValAssign::ZExt: 2703 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, 2704 DAG.getValueType(VA.getValVT())); 2705 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2706 break; 2707 case CCValAssign::SExt: 2708 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, 2709 DAG.getValueType(VA.getValVT())); 2710 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2711 break; 2712 case CCValAssign::AExt: 2713 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2714 break; 2715 default: 2716 llvm_unreachable("Unknown loc info!"); 2717 } 2718 2719 InVals.push_back(Val); 2720 } 2721 2722 return Chain; 2723 } 2724 2725 // Add code to pass special inputs required depending on used features separate 2726 // from the explicit user arguments present in the IR. 2727 void SITargetLowering::passSpecialInputs( 2728 CallLoweringInfo &CLI, 2729 CCState &CCInfo, 2730 const SIMachineFunctionInfo &Info, 2731 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 2732 SmallVectorImpl<SDValue> &MemOpChains, 2733 SDValue Chain) const { 2734 // If we don't have a call site, this was a call inserted by 2735 // legalization. These can never use special inputs. 2736 if (!CLI.CB) 2737 return; 2738 2739 SelectionDAG &DAG = CLI.DAG; 2740 const SDLoc &DL = CLI.DL; 2741 const Function &F = DAG.getMachineFunction().getFunction(); 2742 2743 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2744 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo(); 2745 2746 const AMDGPUFunctionArgInfo *CalleeArgInfo 2747 = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; 2748 if (const Function *CalleeFunc = CLI.CB->getCalledFunction()) { 2749 auto &ArgUsageInfo = 2750 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 2751 CalleeArgInfo = &ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc); 2752 } 2753 2754 // TODO: Unify with private memory register handling. This is complicated by 2755 // the fact that at least in kernels, the input argument is not necessarily 2756 // in the same location as the input. 2757 static constexpr std::pair<AMDGPUFunctionArgInfo::PreloadedValue, 2758 StringLiteral> ImplicitAttrs[] = { 2759 {AMDGPUFunctionArgInfo::DISPATCH_PTR, "amdgpu-no-dispatch-ptr"}, 2760 {AMDGPUFunctionArgInfo::QUEUE_PTR, "amdgpu-no-queue-ptr" }, 2761 {AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, "amdgpu-no-implicitarg-ptr"}, 2762 {AMDGPUFunctionArgInfo::DISPATCH_ID, "amdgpu-no-dispatch-id"}, 2763 {AMDGPUFunctionArgInfo::WORKGROUP_ID_X, "amdgpu-no-workgroup-id-x"}, 2764 {AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,"amdgpu-no-workgroup-id-y"}, 2765 {AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,"amdgpu-no-workgroup-id-z"} 2766 }; 2767 2768 for (auto Attr : ImplicitAttrs) { 2769 const ArgDescriptor *OutgoingArg; 2770 const TargetRegisterClass *ArgRC; 2771 LLT ArgTy; 2772 2773 AMDGPUFunctionArgInfo::PreloadedValue InputID = Attr.first; 2774 2775 // If the callee does not use the attribute value, skip copying the value. 2776 if (CLI.CB->hasFnAttr(Attr.second)) 2777 continue; 2778 2779 std::tie(OutgoingArg, ArgRC, ArgTy) = 2780 CalleeArgInfo->getPreloadedValue(InputID); 2781 if (!OutgoingArg) 2782 continue; 2783 2784 const ArgDescriptor *IncomingArg; 2785 const TargetRegisterClass *IncomingArgRC; 2786 LLT Ty; 2787 std::tie(IncomingArg, IncomingArgRC, Ty) = 2788 CallerArgInfo.getPreloadedValue(InputID); 2789 assert(IncomingArgRC == ArgRC); 2790 2791 // All special arguments are ints for now. 2792 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32; 2793 SDValue InputReg; 2794 2795 if (IncomingArg) { 2796 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg); 2797 } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) { 2798 // The implicit arg ptr is special because it doesn't have a corresponding 2799 // input for kernels, and is computed from the kernarg segment pointer. 2800 InputReg = getImplicitArgPtr(DAG, DL); 2801 } else { 2802 // We may have proven the input wasn't needed, although the ABI is 2803 // requiring it. We just need to allocate the register appropriately. 2804 InputReg = DAG.getUNDEF(ArgVT); 2805 } 2806 2807 if (OutgoingArg->isRegister()) { 2808 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); 2809 if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 2810 report_fatal_error("failed to allocate implicit input argument"); 2811 } else { 2812 unsigned SpecialArgOffset = 2813 CCInfo.AllocateStack(ArgVT.getStoreSize(), Align(4)); 2814 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, 2815 SpecialArgOffset); 2816 MemOpChains.push_back(ArgStore); 2817 } 2818 } 2819 2820 // Pack workitem IDs into a single register or pass it as is if already 2821 // packed. 2822 const ArgDescriptor *OutgoingArg; 2823 const TargetRegisterClass *ArgRC; 2824 LLT Ty; 2825 2826 std::tie(OutgoingArg, ArgRC, Ty) = 2827 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 2828 if (!OutgoingArg) 2829 std::tie(OutgoingArg, ArgRC, Ty) = 2830 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 2831 if (!OutgoingArg) 2832 std::tie(OutgoingArg, ArgRC, Ty) = 2833 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 2834 if (!OutgoingArg) 2835 return; 2836 2837 const ArgDescriptor *IncomingArgX = std::get<0>( 2838 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X)); 2839 const ArgDescriptor *IncomingArgY = std::get<0>( 2840 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y)); 2841 const ArgDescriptor *IncomingArgZ = std::get<0>( 2842 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z)); 2843 2844 SDValue InputReg; 2845 SDLoc SL; 2846 2847 const bool NeedWorkItemIDX = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-x"); 2848 const bool NeedWorkItemIDY = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-y"); 2849 const bool NeedWorkItemIDZ = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-z"); 2850 2851 // If incoming ids are not packed we need to pack them. 2852 if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX && 2853 NeedWorkItemIDX) { 2854 if (Subtarget->getMaxWorkitemID(F, 0) != 0) { 2855 InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX); 2856 } else { 2857 InputReg = DAG.getConstant(0, DL, MVT::i32); 2858 } 2859 } 2860 2861 if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY && 2862 NeedWorkItemIDY && Subtarget->getMaxWorkitemID(F, 1) != 0) { 2863 SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY); 2864 Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y, 2865 DAG.getShiftAmountConstant(10, MVT::i32, SL)); 2866 InputReg = InputReg.getNode() ? 2867 DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y; 2868 } 2869 2870 if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ && 2871 NeedWorkItemIDZ && Subtarget->getMaxWorkitemID(F, 2) != 0) { 2872 SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ); 2873 Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z, 2874 DAG.getShiftAmountConstant(20, MVT::i32, SL)); 2875 InputReg = InputReg.getNode() ? 2876 DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z; 2877 } 2878 2879 if (!InputReg && (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) { 2880 if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) { 2881 // We're in a situation where the outgoing function requires the workitem 2882 // ID, but the calling function does not have it (e.g a graphics function 2883 // calling a C calling convention function). This is illegal, but we need 2884 // to produce something. 2885 InputReg = DAG.getUNDEF(MVT::i32); 2886 } else { 2887 // Workitem ids are already packed, any of present incoming arguments 2888 // will carry all required fields. 2889 ArgDescriptor IncomingArg = ArgDescriptor::createArg( 2890 IncomingArgX ? *IncomingArgX : 2891 IncomingArgY ? *IncomingArgY : 2892 *IncomingArgZ, ~0u); 2893 InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg); 2894 } 2895 } 2896 2897 if (OutgoingArg->isRegister()) { 2898 if (InputReg) 2899 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); 2900 2901 CCInfo.AllocateReg(OutgoingArg->getRegister()); 2902 } else { 2903 unsigned SpecialArgOffset = CCInfo.AllocateStack(4, Align(4)); 2904 if (InputReg) { 2905 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, 2906 SpecialArgOffset); 2907 MemOpChains.push_back(ArgStore); 2908 } 2909 } 2910 } 2911 2912 static bool canGuaranteeTCO(CallingConv::ID CC) { 2913 return CC == CallingConv::Fast; 2914 } 2915 2916 /// Return true if we might ever do TCO for calls with this calling convention. 2917 static bool mayTailCallThisCC(CallingConv::ID CC) { 2918 switch (CC) { 2919 case CallingConv::C: 2920 case CallingConv::AMDGPU_Gfx: 2921 return true; 2922 default: 2923 return canGuaranteeTCO(CC); 2924 } 2925 } 2926 2927 bool SITargetLowering::isEligibleForTailCallOptimization( 2928 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, 2929 const SmallVectorImpl<ISD::OutputArg> &Outs, 2930 const SmallVectorImpl<SDValue> &OutVals, 2931 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { 2932 if (!mayTailCallThisCC(CalleeCC)) 2933 return false; 2934 2935 // For a divergent call target, we need to do a waterfall loop over the 2936 // possible callees which precludes us from using a simple jump. 2937 if (Callee->isDivergent()) 2938 return false; 2939 2940 MachineFunction &MF = DAG.getMachineFunction(); 2941 const Function &CallerF = MF.getFunction(); 2942 CallingConv::ID CallerCC = CallerF.getCallingConv(); 2943 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2944 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2945 2946 // Kernels aren't callable, and don't have a live in return address so it 2947 // doesn't make sense to do a tail call with entry functions. 2948 if (!CallerPreserved) 2949 return false; 2950 2951 bool CCMatch = CallerCC == CalleeCC; 2952 2953 if (DAG.getTarget().Options.GuaranteedTailCallOpt) { 2954 if (canGuaranteeTCO(CalleeCC) && CCMatch) 2955 return true; 2956 return false; 2957 } 2958 2959 // TODO: Can we handle var args? 2960 if (IsVarArg) 2961 return false; 2962 2963 for (const Argument &Arg : CallerF.args()) { 2964 if (Arg.hasByValAttr()) 2965 return false; 2966 } 2967 2968 LLVMContext &Ctx = *DAG.getContext(); 2969 2970 // Check that the call results are passed in the same way. 2971 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins, 2972 CCAssignFnForCall(CalleeCC, IsVarArg), 2973 CCAssignFnForCall(CallerCC, IsVarArg))) 2974 return false; 2975 2976 // The callee has to preserve all registers the caller needs to preserve. 2977 if (!CCMatch) { 2978 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2979 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2980 return false; 2981 } 2982 2983 // Nothing more to check if the callee is taking no arguments. 2984 if (Outs.empty()) 2985 return true; 2986 2987 SmallVector<CCValAssign, 16> ArgLocs; 2988 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx); 2989 2990 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg)); 2991 2992 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 2993 // If the stack arguments for this call do not fit into our own save area then 2994 // the call cannot be made tail. 2995 // TODO: Is this really necessary? 2996 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) 2997 return false; 2998 2999 const MachineRegisterInfo &MRI = MF.getRegInfo(); 3000 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals); 3001 } 3002 3003 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 3004 if (!CI->isTailCall()) 3005 return false; 3006 3007 const Function *ParentFn = CI->getParent()->getParent(); 3008 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) 3009 return false; 3010 return true; 3011 } 3012 3013 // The wave scratch offset register is used as the global base pointer. 3014 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI, 3015 SmallVectorImpl<SDValue> &InVals) const { 3016 SelectionDAG &DAG = CLI.DAG; 3017 const SDLoc &DL = CLI.DL; 3018 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 3019 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 3020 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 3021 SDValue Chain = CLI.Chain; 3022 SDValue Callee = CLI.Callee; 3023 bool &IsTailCall = CLI.IsTailCall; 3024 CallingConv::ID CallConv = CLI.CallConv; 3025 bool IsVarArg = CLI.IsVarArg; 3026 bool IsSibCall = false; 3027 bool IsThisReturn = false; 3028 MachineFunction &MF = DAG.getMachineFunction(); 3029 3030 if (Callee.isUndef() || isNullConstant(Callee)) { 3031 if (!CLI.IsTailCall) { 3032 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I) 3033 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT)); 3034 } 3035 3036 return Chain; 3037 } 3038 3039 if (IsVarArg) { 3040 return lowerUnhandledCall(CLI, InVals, 3041 "unsupported call to variadic function "); 3042 } 3043 3044 if (!CLI.CB) 3045 report_fatal_error("unsupported libcall legalization"); 3046 3047 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) { 3048 return lowerUnhandledCall(CLI, InVals, 3049 "unsupported required tail call to function "); 3050 } 3051 3052 if (AMDGPU::isShader(CallConv)) { 3053 // Note the issue is with the CC of the called function, not of the call 3054 // itself. 3055 return lowerUnhandledCall(CLI, InVals, 3056 "unsupported call to a shader function "); 3057 } 3058 3059 if (AMDGPU::isShader(MF.getFunction().getCallingConv()) && 3060 CallConv != CallingConv::AMDGPU_Gfx) { 3061 // Only allow calls with specific calling conventions. 3062 return lowerUnhandledCall(CLI, InVals, 3063 "unsupported calling convention for call from " 3064 "graphics shader of function "); 3065 } 3066 3067 if (IsTailCall) { 3068 IsTailCall = isEligibleForTailCallOptimization( 3069 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); 3070 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall()) { 3071 report_fatal_error("failed to perform tail call elimination on a call " 3072 "site marked musttail"); 3073 } 3074 3075 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 3076 3077 // A sibling call is one where we're under the usual C ABI and not planning 3078 // to change that but can still do a tail call: 3079 if (!TailCallOpt && IsTailCall) 3080 IsSibCall = true; 3081 3082 if (IsTailCall) 3083 ++NumTailCalls; 3084 } 3085 3086 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 3087 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3088 SmallVector<SDValue, 8> MemOpChains; 3089 3090 // Analyze operands of the call, assigning locations to each operand. 3091 SmallVector<CCValAssign, 16> ArgLocs; 3092 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 3093 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg); 3094 3095 if (CallConv != CallingConv::AMDGPU_Gfx) { 3096 // With a fixed ABI, allocate fixed registers before user arguments. 3097 passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain); 3098 } 3099 3100 CCInfo.AnalyzeCallOperands(Outs, AssignFn); 3101 3102 // Get a count of how many bytes are to be pushed on the stack. 3103 unsigned NumBytes = CCInfo.getNextStackOffset(); 3104 3105 if (IsSibCall) { 3106 // Since we're not changing the ABI to make this a tail call, the memory 3107 // operands are already available in the caller's incoming argument space. 3108 NumBytes = 0; 3109 } 3110 3111 // FPDiff is the byte offset of the call's argument area from the callee's. 3112 // Stores to callee stack arguments will be placed in FixedStackSlots offset 3113 // by this amount for a tail call. In a sibling call it must be 0 because the 3114 // caller will deallocate the entire stack and the callee still expects its 3115 // arguments to begin at SP+0. Completely unused for non-tail calls. 3116 int32_t FPDiff = 0; 3117 MachineFrameInfo &MFI = MF.getFrameInfo(); 3118 3119 // Adjust the stack pointer for the new arguments... 3120 // These operations are automatically eliminated by the prolog/epilog pass 3121 if (!IsSibCall) { 3122 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); 3123 3124 if (!Subtarget->enableFlatScratch()) { 3125 SmallVector<SDValue, 4> CopyFromChains; 3126 3127 // In the HSA case, this should be an identity copy. 3128 SDValue ScratchRSrcReg 3129 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32); 3130 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); 3131 CopyFromChains.push_back(ScratchRSrcReg.getValue(1)); 3132 Chain = DAG.getTokenFactor(DL, CopyFromChains); 3133 } 3134 } 3135 3136 MVT PtrVT = MVT::i32; 3137 3138 // Walk the register/memloc assignments, inserting copies/loads. 3139 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3140 CCValAssign &VA = ArgLocs[i]; 3141 SDValue Arg = OutVals[i]; 3142 3143 // Promote the value if needed. 3144 switch (VA.getLocInfo()) { 3145 case CCValAssign::Full: 3146 break; 3147 case CCValAssign::BCvt: 3148 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 3149 break; 3150 case CCValAssign::ZExt: 3151 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 3152 break; 3153 case CCValAssign::SExt: 3154 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 3155 break; 3156 case CCValAssign::AExt: 3157 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 3158 break; 3159 case CCValAssign::FPExt: 3160 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); 3161 break; 3162 default: 3163 llvm_unreachable("Unknown loc info!"); 3164 } 3165 3166 if (VA.isRegLoc()) { 3167 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 3168 } else { 3169 assert(VA.isMemLoc()); 3170 3171 SDValue DstAddr; 3172 MachinePointerInfo DstInfo; 3173 3174 unsigned LocMemOffset = VA.getLocMemOffset(); 3175 int32_t Offset = LocMemOffset; 3176 3177 SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT); 3178 MaybeAlign Alignment; 3179 3180 if (IsTailCall) { 3181 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3182 unsigned OpSize = Flags.isByVal() ? 3183 Flags.getByValSize() : VA.getValVT().getStoreSize(); 3184 3185 // FIXME: We can have better than the minimum byval required alignment. 3186 Alignment = 3187 Flags.isByVal() 3188 ? Flags.getNonZeroByValAlign() 3189 : commonAlignment(Subtarget->getStackAlignment(), Offset); 3190 3191 Offset = Offset + FPDiff; 3192 int FI = MFI.CreateFixedObject(OpSize, Offset, true); 3193 3194 DstAddr = DAG.getFrameIndex(FI, PtrVT); 3195 DstInfo = MachinePointerInfo::getFixedStack(MF, FI); 3196 3197 // Make sure any stack arguments overlapping with where we're storing 3198 // are loaded before this eventual operation. Otherwise they'll be 3199 // clobbered. 3200 3201 // FIXME: Why is this really necessary? This seems to just result in a 3202 // lot of code to copy the stack and write them back to the same 3203 // locations, which are supposed to be immutable? 3204 Chain = addTokenForArgument(Chain, DAG, MFI, FI); 3205 } else { 3206 // Stores to the argument stack area are relative to the stack pointer. 3207 SDValue SP = DAG.getCopyFromReg(Chain, DL, Info->getStackPtrOffsetReg(), 3208 MVT::i32); 3209 DstAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, SP, PtrOff); 3210 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); 3211 Alignment = 3212 commonAlignment(Subtarget->getStackAlignment(), LocMemOffset); 3213 } 3214 3215 if (Outs[i].Flags.isByVal()) { 3216 SDValue SizeNode = 3217 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32); 3218 SDValue Cpy = 3219 DAG.getMemcpy(Chain, DL, DstAddr, Arg, SizeNode, 3220 Outs[i].Flags.getNonZeroByValAlign(), 3221 /*isVol = */ false, /*AlwaysInline = */ true, 3222 /*isTailCall = */ false, DstInfo, 3223 MachinePointerInfo(AMDGPUAS::PRIVATE_ADDRESS)); 3224 3225 MemOpChains.push_back(Cpy); 3226 } else { 3227 SDValue Store = 3228 DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Alignment); 3229 MemOpChains.push_back(Store); 3230 } 3231 } 3232 } 3233 3234 if (!MemOpChains.empty()) 3235 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 3236 3237 // Build a sequence of copy-to-reg nodes chained together with token chain 3238 // and flag operands which copy the outgoing args into the appropriate regs. 3239 SDValue InFlag; 3240 for (auto &RegToPass : RegsToPass) { 3241 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, 3242 RegToPass.second, InFlag); 3243 InFlag = Chain.getValue(1); 3244 } 3245 3246 3247 // We don't usually want to end the call-sequence here because we would tidy 3248 // the frame up *after* the call, however in the ABI-changing tail-call case 3249 // we've carefully laid out the parameters so that when sp is reset they'll be 3250 // in the correct location. 3251 if (IsTailCall && !IsSibCall) { 3252 Chain = DAG.getCALLSEQ_END(Chain, 3253 DAG.getTargetConstant(NumBytes, DL, MVT::i32), 3254 DAG.getTargetConstant(0, DL, MVT::i32), 3255 InFlag, DL); 3256 InFlag = Chain.getValue(1); 3257 } 3258 3259 std::vector<SDValue> Ops; 3260 Ops.push_back(Chain); 3261 Ops.push_back(Callee); 3262 // Add a redundant copy of the callee global which will not be legalized, as 3263 // we need direct access to the callee later. 3264 if (GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(Callee)) { 3265 const GlobalValue *GV = GSD->getGlobal(); 3266 Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64)); 3267 } else { 3268 Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64)); 3269 } 3270 3271 if (IsTailCall) { 3272 // Each tail call may have to adjust the stack by a different amount, so 3273 // this information must travel along with the operation for eventual 3274 // consumption by emitEpilogue. 3275 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); 3276 } 3277 3278 // Add argument registers to the end of the list so that they are known live 3279 // into the call. 3280 for (auto &RegToPass : RegsToPass) { 3281 Ops.push_back(DAG.getRegister(RegToPass.first, 3282 RegToPass.second.getValueType())); 3283 } 3284 3285 // Add a register mask operand representing the call-preserved registers. 3286 3287 auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); 3288 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 3289 assert(Mask && "Missing call preserved mask for calling convention"); 3290 Ops.push_back(DAG.getRegisterMask(Mask)); 3291 3292 if (InFlag.getNode()) 3293 Ops.push_back(InFlag); 3294 3295 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 3296 3297 // If we're doing a tall call, use a TC_RETURN here rather than an 3298 // actual call instruction. 3299 if (IsTailCall) { 3300 MFI.setHasTailCall(); 3301 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops); 3302 } 3303 3304 // Returns a chain and a flag for retval copy to use. 3305 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops); 3306 Chain = Call.getValue(0); 3307 InFlag = Call.getValue(1); 3308 3309 uint64_t CalleePopBytes = NumBytes; 3310 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32), 3311 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32), 3312 InFlag, DL); 3313 if (!Ins.empty()) 3314 InFlag = Chain.getValue(1); 3315 3316 // Handle result values, copying them out of physregs into vregs that we 3317 // return. 3318 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, 3319 InVals, IsThisReturn, 3320 IsThisReturn ? OutVals[0] : SDValue()); 3321 } 3322 3323 // This is identical to the default implementation in ExpandDYNAMIC_STACKALLOC, 3324 // except for applying the wave size scale to the increment amount. 3325 SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl( 3326 SDValue Op, SelectionDAG &DAG) const { 3327 const MachineFunction &MF = DAG.getMachineFunction(); 3328 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 3329 3330 SDLoc dl(Op); 3331 EVT VT = Op.getValueType(); 3332 SDValue Tmp1 = Op; 3333 SDValue Tmp2 = Op.getValue(1); 3334 SDValue Tmp3 = Op.getOperand(2); 3335 SDValue Chain = Tmp1.getOperand(0); 3336 3337 Register SPReg = Info->getStackPtrOffsetReg(); 3338 3339 // Chain the dynamic stack allocation so that it doesn't modify the stack 3340 // pointer when other instructions are using the stack. 3341 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); 3342 3343 SDValue Size = Tmp2.getOperand(1); 3344 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 3345 Chain = SP.getValue(1); 3346 MaybeAlign Alignment = cast<ConstantSDNode>(Tmp3)->getMaybeAlignValue(); 3347 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 3348 const TargetFrameLowering *TFL = ST.getFrameLowering(); 3349 unsigned Opc = 3350 TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp ? 3351 ISD::ADD : ISD::SUB; 3352 3353 SDValue ScaledSize = DAG.getNode( 3354 ISD::SHL, dl, VT, Size, 3355 DAG.getConstant(ST.getWavefrontSizeLog2(), dl, MVT::i32)); 3356 3357 Align StackAlign = TFL->getStackAlign(); 3358 Tmp1 = DAG.getNode(Opc, dl, VT, SP, ScaledSize); // Value 3359 if (Alignment && *Alignment > StackAlign) { 3360 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1, 3361 DAG.getConstant(-(uint64_t)Alignment->value() 3362 << ST.getWavefrontSizeLog2(), 3363 dl, VT)); 3364 } 3365 3366 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain 3367 Tmp2 = DAG.getCALLSEQ_END( 3368 Chain, DAG.getIntPtrConstant(0, dl, true), 3369 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl); 3370 3371 return DAG.getMergeValues({Tmp1, Tmp2}, dl); 3372 } 3373 3374 SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 3375 SelectionDAG &DAG) const { 3376 // We only handle constant sizes here to allow non-entry block, static sized 3377 // allocas. A truly dynamic value is more difficult to support because we 3378 // don't know if the size value is uniform or not. If the size isn't uniform, 3379 // we would need to do a wave reduction to get the maximum size to know how 3380 // much to increment the uniform stack pointer. 3381 SDValue Size = Op.getOperand(1); 3382 if (isa<ConstantSDNode>(Size)) 3383 return lowerDYNAMIC_STACKALLOCImpl(Op, DAG); // Use "generic" expansion. 3384 3385 return AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(Op, DAG); 3386 } 3387 3388 Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT, 3389 const MachineFunction &MF) const { 3390 Register Reg = StringSwitch<Register>(RegName) 3391 .Case("m0", AMDGPU::M0) 3392 .Case("exec", AMDGPU::EXEC) 3393 .Case("exec_lo", AMDGPU::EXEC_LO) 3394 .Case("exec_hi", AMDGPU::EXEC_HI) 3395 .Case("flat_scratch", AMDGPU::FLAT_SCR) 3396 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 3397 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 3398 .Default(Register()); 3399 3400 if (Reg == AMDGPU::NoRegister) { 3401 report_fatal_error(Twine("invalid register name \"" 3402 + StringRef(RegName) + "\".")); 3403 3404 } 3405 3406 if (!Subtarget->hasFlatScrRegister() && 3407 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 3408 report_fatal_error(Twine("invalid register \"" 3409 + StringRef(RegName) + "\" for subtarget.")); 3410 } 3411 3412 switch (Reg) { 3413 case AMDGPU::M0: 3414 case AMDGPU::EXEC_LO: 3415 case AMDGPU::EXEC_HI: 3416 case AMDGPU::FLAT_SCR_LO: 3417 case AMDGPU::FLAT_SCR_HI: 3418 if (VT.getSizeInBits() == 32) 3419 return Reg; 3420 break; 3421 case AMDGPU::EXEC: 3422 case AMDGPU::FLAT_SCR: 3423 if (VT.getSizeInBits() == 64) 3424 return Reg; 3425 break; 3426 default: 3427 llvm_unreachable("missing register type checking"); 3428 } 3429 3430 report_fatal_error(Twine("invalid type for register \"" 3431 + StringRef(RegName) + "\".")); 3432 } 3433 3434 // If kill is not the last instruction, split the block so kill is always a 3435 // proper terminator. 3436 MachineBasicBlock * 3437 SITargetLowering::splitKillBlock(MachineInstr &MI, 3438 MachineBasicBlock *BB) const { 3439 MachineBasicBlock *SplitBB = BB->splitAt(MI, false /*UpdateLiveIns*/); 3440 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3441 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); 3442 return SplitBB; 3443 } 3444 3445 // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true, 3446 // \p MI will be the only instruction in the loop body block. Otherwise, it will 3447 // be the first instruction in the remainder block. 3448 // 3449 /// \returns { LoopBody, Remainder } 3450 static std::pair<MachineBasicBlock *, MachineBasicBlock *> 3451 splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) { 3452 MachineFunction *MF = MBB.getParent(); 3453 MachineBasicBlock::iterator I(&MI); 3454 3455 // To insert the loop we need to split the block. Move everything after this 3456 // point to a new block, and insert a new empty block between the two. 3457 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 3458 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 3459 MachineFunction::iterator MBBI(MBB); 3460 ++MBBI; 3461 3462 MF->insert(MBBI, LoopBB); 3463 MF->insert(MBBI, RemainderBB); 3464 3465 LoopBB->addSuccessor(LoopBB); 3466 LoopBB->addSuccessor(RemainderBB); 3467 3468 // Move the rest of the block into a new block. 3469 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 3470 3471 if (InstInLoop) { 3472 auto Next = std::next(I); 3473 3474 // Move instruction to loop body. 3475 LoopBB->splice(LoopBB->begin(), &MBB, I, Next); 3476 3477 // Move the rest of the block. 3478 RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end()); 3479 } else { 3480 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 3481 } 3482 3483 MBB.addSuccessor(LoopBB); 3484 3485 return std::make_pair(LoopBB, RemainderBB); 3486 } 3487 3488 /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it. 3489 void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const { 3490 MachineBasicBlock *MBB = MI.getParent(); 3491 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3492 auto I = MI.getIterator(); 3493 auto E = std::next(I); 3494 3495 BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT)) 3496 .addImm(0); 3497 3498 MIBundleBuilder Bundler(*MBB, I, E); 3499 finalizeBundle(*MBB, Bundler.begin()); 3500 } 3501 3502 MachineBasicBlock * 3503 SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI, 3504 MachineBasicBlock *BB) const { 3505 const DebugLoc &DL = MI.getDebugLoc(); 3506 3507 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3508 3509 MachineBasicBlock *LoopBB; 3510 MachineBasicBlock *RemainderBB; 3511 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3512 3513 // Apparently kill flags are only valid if the def is in the same block? 3514 if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0)) 3515 Src->setIsKill(false); 3516 3517 std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true); 3518 3519 MachineBasicBlock::iterator I = LoopBB->end(); 3520 3521 const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg( 3522 AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1); 3523 3524 // Clear TRAP_STS.MEM_VIOL 3525 BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32)) 3526 .addImm(0) 3527 .addImm(EncodedReg); 3528 3529 bundleInstWithWaitcnt(MI); 3530 3531 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3532 3533 // Load and check TRAP_STS.MEM_VIOL 3534 BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg) 3535 .addImm(EncodedReg); 3536 3537 // FIXME: Do we need to use an isel pseudo that may clobber scc? 3538 BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32)) 3539 .addReg(Reg, RegState::Kill) 3540 .addImm(0); 3541 BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 3542 .addMBB(LoopBB); 3543 3544 return RemainderBB; 3545 } 3546 3547 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 3548 // wavefront. If the value is uniform and just happens to be in a VGPR, this 3549 // will only do one iteration. In the worst case, this will loop 64 times. 3550 // 3551 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 3552 static MachineBasicBlock::iterator 3553 emitLoadM0FromVGPRLoop(const SIInstrInfo *TII, MachineRegisterInfo &MRI, 3554 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 3555 const DebugLoc &DL, const MachineOperand &Idx, 3556 unsigned InitReg, unsigned ResultReg, unsigned PhiReg, 3557 unsigned InitSaveExecReg, int Offset, bool UseGPRIdxMode, 3558 Register &SGPRIdxReg) { 3559 3560 MachineFunction *MF = OrigBB.getParent(); 3561 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3562 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 3563 MachineBasicBlock::iterator I = LoopBB.begin(); 3564 3565 const TargetRegisterClass *BoolRC = TRI->getBoolRC(); 3566 Register PhiExec = MRI.createVirtualRegister(BoolRC); 3567 Register NewExec = MRI.createVirtualRegister(BoolRC); 3568 Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3569 Register CondReg = MRI.createVirtualRegister(BoolRC); 3570 3571 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 3572 .addReg(InitReg) 3573 .addMBB(&OrigBB) 3574 .addReg(ResultReg) 3575 .addMBB(&LoopBB); 3576 3577 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 3578 .addReg(InitSaveExecReg) 3579 .addMBB(&OrigBB) 3580 .addReg(NewExec) 3581 .addMBB(&LoopBB); 3582 3583 // Read the next variant <- also loop target. 3584 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 3585 .addReg(Idx.getReg(), getUndefRegState(Idx.isUndef())); 3586 3587 // Compare the just read M0 value to all possible Idx values. 3588 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 3589 .addReg(CurrentIdxReg) 3590 .addReg(Idx.getReg(), 0, Idx.getSubReg()); 3591 3592 // Update EXEC, save the original EXEC value to VCC. 3593 BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 3594 : AMDGPU::S_AND_SAVEEXEC_B64), 3595 NewExec) 3596 .addReg(CondReg, RegState::Kill); 3597 3598 MRI.setSimpleHint(NewExec, CondReg); 3599 3600 if (UseGPRIdxMode) { 3601 if (Offset == 0) { 3602 SGPRIdxReg = CurrentIdxReg; 3603 } else { 3604 SGPRIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3605 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), SGPRIdxReg) 3606 .addReg(CurrentIdxReg, RegState::Kill) 3607 .addImm(Offset); 3608 } 3609 } else { 3610 // Move index from VCC into M0 3611 if (Offset == 0) { 3612 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 3613 .addReg(CurrentIdxReg, RegState::Kill); 3614 } else { 3615 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 3616 .addReg(CurrentIdxReg, RegState::Kill) 3617 .addImm(Offset); 3618 } 3619 } 3620 3621 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 3622 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 3623 MachineInstr *InsertPt = 3624 BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term 3625 : AMDGPU::S_XOR_B64_term), Exec) 3626 .addReg(Exec) 3627 .addReg(NewExec); 3628 3629 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 3630 // s_cbranch_scc0? 3631 3632 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 3633 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 3634 .addMBB(&LoopBB); 3635 3636 return InsertPt->getIterator(); 3637 } 3638 3639 // This has slightly sub-optimal regalloc when the source vector is killed by 3640 // the read. The register allocator does not understand that the kill is 3641 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 3642 // subregister from it, using 1 more VGPR than necessary. This was saved when 3643 // this was expanded after register allocation. 3644 static MachineBasicBlock::iterator 3645 loadM0FromVGPR(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineInstr &MI, 3646 unsigned InitResultReg, unsigned PhiReg, int Offset, 3647 bool UseGPRIdxMode, Register &SGPRIdxReg) { 3648 MachineFunction *MF = MBB.getParent(); 3649 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3650 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 3651 MachineRegisterInfo &MRI = MF->getRegInfo(); 3652 const DebugLoc &DL = MI.getDebugLoc(); 3653 MachineBasicBlock::iterator I(&MI); 3654 3655 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 3656 Register DstReg = MI.getOperand(0).getReg(); 3657 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 3658 Register TmpExec = MRI.createVirtualRegister(BoolXExecRC); 3659 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 3660 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 3661 3662 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 3663 3664 // Save the EXEC mask 3665 BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec) 3666 .addReg(Exec); 3667 3668 MachineBasicBlock *LoopBB; 3669 MachineBasicBlock *RemainderBB; 3670 std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false); 3671 3672 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3673 3674 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 3675 InitResultReg, DstReg, PhiReg, TmpExec, 3676 Offset, UseGPRIdxMode, SGPRIdxReg); 3677 3678 MachineBasicBlock* LandingPad = MF->CreateMachineBasicBlock(); 3679 MachineFunction::iterator MBBI(LoopBB); 3680 ++MBBI; 3681 MF->insert(MBBI, LandingPad); 3682 LoopBB->removeSuccessor(RemainderBB); 3683 LandingPad->addSuccessor(RemainderBB); 3684 LoopBB->addSuccessor(LandingPad); 3685 MachineBasicBlock::iterator First = LandingPad->begin(); 3686 BuildMI(*LandingPad, First, DL, TII->get(MovExecOpc), Exec) 3687 .addReg(SaveExec); 3688 3689 return InsPt; 3690 } 3691 3692 // Returns subreg index, offset 3693 static std::pair<unsigned, int> 3694 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 3695 const TargetRegisterClass *SuperRC, 3696 unsigned VecReg, 3697 int Offset) { 3698 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; 3699 3700 // Skip out of bounds offsets, or else we would end up using an undefined 3701 // register. 3702 if (Offset >= NumElts || Offset < 0) 3703 return std::make_pair(AMDGPU::sub0, Offset); 3704 3705 return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0); 3706 } 3707 3708 static void setM0ToIndexFromSGPR(const SIInstrInfo *TII, 3709 MachineRegisterInfo &MRI, MachineInstr &MI, 3710 int Offset) { 3711 MachineBasicBlock *MBB = MI.getParent(); 3712 const DebugLoc &DL = MI.getDebugLoc(); 3713 MachineBasicBlock::iterator I(&MI); 3714 3715 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3716 3717 assert(Idx->getReg() != AMDGPU::NoRegister); 3718 3719 if (Offset == 0) { 3720 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0).add(*Idx); 3721 } else { 3722 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 3723 .add(*Idx) 3724 .addImm(Offset); 3725 } 3726 } 3727 3728 static Register getIndirectSGPRIdx(const SIInstrInfo *TII, 3729 MachineRegisterInfo &MRI, MachineInstr &MI, 3730 int Offset) { 3731 MachineBasicBlock *MBB = MI.getParent(); 3732 const DebugLoc &DL = MI.getDebugLoc(); 3733 MachineBasicBlock::iterator I(&MI); 3734 3735 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3736 3737 if (Offset == 0) 3738 return Idx->getReg(); 3739 3740 Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3741 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 3742 .add(*Idx) 3743 .addImm(Offset); 3744 return Tmp; 3745 } 3746 3747 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 3748 MachineBasicBlock &MBB, 3749 const GCNSubtarget &ST) { 3750 const SIInstrInfo *TII = ST.getInstrInfo(); 3751 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 3752 MachineFunction *MF = MBB.getParent(); 3753 MachineRegisterInfo &MRI = MF->getRegInfo(); 3754 3755 Register Dst = MI.getOperand(0).getReg(); 3756 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3757 Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 3758 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 3759 3760 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 3761 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 3762 3763 unsigned SubReg; 3764 std::tie(SubReg, Offset) 3765 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 3766 3767 const bool UseGPRIdxMode = ST.useVGPRIndexMode(); 3768 3769 // Check for a SGPR index. 3770 if (TII->getRegisterInfo().isSGPRClass(IdxRC)) { 3771 MachineBasicBlock::iterator I(&MI); 3772 const DebugLoc &DL = MI.getDebugLoc(); 3773 3774 if (UseGPRIdxMode) { 3775 // TODO: Look at the uses to avoid the copy. This may require rescheduling 3776 // to avoid interfering with other uses, so probably requires a new 3777 // optimization pass. 3778 Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset); 3779 3780 const MCInstrDesc &GPRIDXDesc = 3781 TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true); 3782 BuildMI(MBB, I, DL, GPRIDXDesc, Dst) 3783 .addReg(SrcReg) 3784 .addReg(Idx) 3785 .addImm(SubReg); 3786 } else { 3787 setM0ToIndexFromSGPR(TII, MRI, MI, Offset); 3788 3789 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 3790 .addReg(SrcReg, 0, SubReg) 3791 .addReg(SrcReg, RegState::Implicit); 3792 } 3793 3794 MI.eraseFromParent(); 3795 3796 return &MBB; 3797 } 3798 3799 // Control flow needs to be inserted if indexing with a VGPR. 3800 const DebugLoc &DL = MI.getDebugLoc(); 3801 MachineBasicBlock::iterator I(&MI); 3802 3803 Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3804 Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3805 3806 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 3807 3808 Register SGPRIdxReg; 3809 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, 3810 UseGPRIdxMode, SGPRIdxReg); 3811 3812 MachineBasicBlock *LoopBB = InsPt->getParent(); 3813 3814 if (UseGPRIdxMode) { 3815 const MCInstrDesc &GPRIDXDesc = 3816 TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true); 3817 3818 BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst) 3819 .addReg(SrcReg) 3820 .addReg(SGPRIdxReg) 3821 .addImm(SubReg); 3822 } else { 3823 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 3824 .addReg(SrcReg, 0, SubReg) 3825 .addReg(SrcReg, RegState::Implicit); 3826 } 3827 3828 MI.eraseFromParent(); 3829 3830 return LoopBB; 3831 } 3832 3833 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 3834 MachineBasicBlock &MBB, 3835 const GCNSubtarget &ST) { 3836 const SIInstrInfo *TII = ST.getInstrInfo(); 3837 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 3838 MachineFunction *MF = MBB.getParent(); 3839 MachineRegisterInfo &MRI = MF->getRegInfo(); 3840 3841 Register Dst = MI.getOperand(0).getReg(); 3842 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 3843 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3844 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 3845 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 3846 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 3847 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 3848 3849 // This can be an immediate, but will be folded later. 3850 assert(Val->getReg()); 3851 3852 unsigned SubReg; 3853 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 3854 SrcVec->getReg(), 3855 Offset); 3856 const bool UseGPRIdxMode = ST.useVGPRIndexMode(); 3857 3858 if (Idx->getReg() == AMDGPU::NoRegister) { 3859 MachineBasicBlock::iterator I(&MI); 3860 const DebugLoc &DL = MI.getDebugLoc(); 3861 3862 assert(Offset == 0); 3863 3864 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 3865 .add(*SrcVec) 3866 .add(*Val) 3867 .addImm(SubReg); 3868 3869 MI.eraseFromParent(); 3870 return &MBB; 3871 } 3872 3873 // Check for a SGPR index. 3874 if (TII->getRegisterInfo().isSGPRClass(IdxRC)) { 3875 MachineBasicBlock::iterator I(&MI); 3876 const DebugLoc &DL = MI.getDebugLoc(); 3877 3878 if (UseGPRIdxMode) { 3879 Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset); 3880 3881 const MCInstrDesc &GPRIDXDesc = 3882 TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); 3883 BuildMI(MBB, I, DL, GPRIDXDesc, Dst) 3884 .addReg(SrcVec->getReg()) 3885 .add(*Val) 3886 .addReg(Idx) 3887 .addImm(SubReg); 3888 } else { 3889 setM0ToIndexFromSGPR(TII, MRI, MI, Offset); 3890 3891 const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo( 3892 TRI.getRegSizeInBits(*VecRC), 32, false); 3893 BuildMI(MBB, I, DL, MovRelDesc, Dst) 3894 .addReg(SrcVec->getReg()) 3895 .add(*Val) 3896 .addImm(SubReg); 3897 } 3898 MI.eraseFromParent(); 3899 return &MBB; 3900 } 3901 3902 // Control flow needs to be inserted if indexing with a VGPR. 3903 if (Val->isReg()) 3904 MRI.clearKillFlags(Val->getReg()); 3905 3906 const DebugLoc &DL = MI.getDebugLoc(); 3907 3908 Register PhiReg = MRI.createVirtualRegister(VecRC); 3909 3910 Register SGPRIdxReg; 3911 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, Offset, 3912 UseGPRIdxMode, SGPRIdxReg); 3913 MachineBasicBlock *LoopBB = InsPt->getParent(); 3914 3915 if (UseGPRIdxMode) { 3916 const MCInstrDesc &GPRIDXDesc = 3917 TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); 3918 3919 BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst) 3920 .addReg(PhiReg) 3921 .add(*Val) 3922 .addReg(SGPRIdxReg) 3923 .addImm(AMDGPU::sub0); 3924 } else { 3925 const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo( 3926 TRI.getRegSizeInBits(*VecRC), 32, false); 3927 BuildMI(*LoopBB, InsPt, DL, MovRelDesc, Dst) 3928 .addReg(PhiReg) 3929 .add(*Val) 3930 .addImm(AMDGPU::sub0); 3931 } 3932 3933 MI.eraseFromParent(); 3934 return LoopBB; 3935 } 3936 3937 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 3938 MachineInstr &MI, MachineBasicBlock *BB) const { 3939 3940 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3941 MachineFunction *MF = BB->getParent(); 3942 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 3943 3944 switch (MI.getOpcode()) { 3945 case AMDGPU::S_UADDO_PSEUDO: 3946 case AMDGPU::S_USUBO_PSEUDO: { 3947 const DebugLoc &DL = MI.getDebugLoc(); 3948 MachineOperand &Dest0 = MI.getOperand(0); 3949 MachineOperand &Dest1 = MI.getOperand(1); 3950 MachineOperand &Src0 = MI.getOperand(2); 3951 MachineOperand &Src1 = MI.getOperand(3); 3952 3953 unsigned Opc = (MI.getOpcode() == AMDGPU::S_UADDO_PSEUDO) 3954 ? AMDGPU::S_ADD_I32 3955 : AMDGPU::S_SUB_I32; 3956 BuildMI(*BB, MI, DL, TII->get(Opc), Dest0.getReg()).add(Src0).add(Src1); 3957 3958 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CSELECT_B64), Dest1.getReg()) 3959 .addImm(1) 3960 .addImm(0); 3961 3962 MI.eraseFromParent(); 3963 return BB; 3964 } 3965 case AMDGPU::S_ADD_U64_PSEUDO: 3966 case AMDGPU::S_SUB_U64_PSEUDO: { 3967 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3968 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3969 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 3970 const TargetRegisterClass *BoolRC = TRI->getBoolRC(); 3971 const DebugLoc &DL = MI.getDebugLoc(); 3972 3973 MachineOperand &Dest = MI.getOperand(0); 3974 MachineOperand &Src0 = MI.getOperand(1); 3975 MachineOperand &Src1 = MI.getOperand(2); 3976 3977 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 3978 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 3979 3980 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm( 3981 MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); 3982 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm( 3983 MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); 3984 3985 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm( 3986 MI, MRI, Src1, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); 3987 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm( 3988 MI, MRI, Src1, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); 3989 3990 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 3991 3992 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 3993 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 3994 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0).add(Src0Sub0).add(Src1Sub0); 3995 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1).add(Src0Sub1).add(Src1Sub1); 3996 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) 3997 .addReg(DestSub0) 3998 .addImm(AMDGPU::sub0) 3999 .addReg(DestSub1) 4000 .addImm(AMDGPU::sub1); 4001 MI.eraseFromParent(); 4002 return BB; 4003 } 4004 case AMDGPU::V_ADD_U64_PSEUDO: 4005 case AMDGPU::V_SUB_U64_PSEUDO: { 4006 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 4007 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 4008 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4009 const DebugLoc &DL = MI.getDebugLoc(); 4010 4011 bool IsAdd = (MI.getOpcode() == AMDGPU::V_ADD_U64_PSEUDO); 4012 4013 MachineOperand &Dest = MI.getOperand(0); 4014 MachineOperand &Src0 = MI.getOperand(1); 4015 MachineOperand &Src1 = MI.getOperand(2); 4016 4017 if (IsAdd && ST.hasLshlAddB64()) { 4018 auto Add = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_LSHL_ADD_U64_e64), 4019 Dest.getReg()) 4020 .add(Src0) 4021 .addImm(0) 4022 .add(Src1); 4023 TII->legalizeOperands(*Add); 4024 MI.eraseFromParent(); 4025 return BB; 4026 } 4027 4028 const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4029 4030 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4031 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4032 4033 Register CarryReg = MRI.createVirtualRegister(CarryRC); 4034 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 4035 4036 const TargetRegisterClass *Src0RC = Src0.isReg() 4037 ? MRI.getRegClass(Src0.getReg()) 4038 : &AMDGPU::VReg_64RegClass; 4039 const TargetRegisterClass *Src1RC = Src1.isReg() 4040 ? MRI.getRegClass(Src1.getReg()) 4041 : &AMDGPU::VReg_64RegClass; 4042 4043 const TargetRegisterClass *Src0SubRC = 4044 TRI->getSubRegClass(Src0RC, AMDGPU::sub0); 4045 const TargetRegisterClass *Src1SubRC = 4046 TRI->getSubRegClass(Src1RC, AMDGPU::sub1); 4047 4048 MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm( 4049 MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); 4050 MachineOperand SrcReg1Sub0 = TII->buildExtractSubRegOrImm( 4051 MI, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC); 4052 4053 MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm( 4054 MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); 4055 MachineOperand SrcReg1Sub1 = TII->buildExtractSubRegOrImm( 4056 MI, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC); 4057 4058 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 4059 MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0) 4060 .addReg(CarryReg, RegState::Define) 4061 .add(SrcReg0Sub0) 4062 .add(SrcReg1Sub0) 4063 .addImm(0); // clamp bit 4064 4065 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 4066 MachineInstr *HiHalf = 4067 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1) 4068 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 4069 .add(SrcReg0Sub1) 4070 .add(SrcReg1Sub1) 4071 .addReg(CarryReg, RegState::Kill) 4072 .addImm(0); // clamp bit 4073 4074 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) 4075 .addReg(DestSub0) 4076 .addImm(AMDGPU::sub0) 4077 .addReg(DestSub1) 4078 .addImm(AMDGPU::sub1); 4079 TII->legalizeOperands(*LoHalf); 4080 TII->legalizeOperands(*HiHalf); 4081 MI.eraseFromParent(); 4082 return BB; 4083 } 4084 case AMDGPU::S_ADD_CO_PSEUDO: 4085 case AMDGPU::S_SUB_CO_PSEUDO: { 4086 // This pseudo has a chance to be selected 4087 // only from uniform add/subcarry node. All the VGPR operands 4088 // therefore assumed to be splat vectors. 4089 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 4090 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 4091 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4092 MachineBasicBlock::iterator MII = MI; 4093 const DebugLoc &DL = MI.getDebugLoc(); 4094 MachineOperand &Dest = MI.getOperand(0); 4095 MachineOperand &CarryDest = MI.getOperand(1); 4096 MachineOperand &Src0 = MI.getOperand(2); 4097 MachineOperand &Src1 = MI.getOperand(3); 4098 MachineOperand &Src2 = MI.getOperand(4); 4099 unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) 4100 ? AMDGPU::S_ADDC_U32 4101 : AMDGPU::S_SUBB_U32; 4102 if (Src0.isReg() && TRI->isVectorRegister(MRI, Src0.getReg())) { 4103 Register RegOp0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 4104 BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp0) 4105 .addReg(Src0.getReg()); 4106 Src0.setReg(RegOp0); 4107 } 4108 if (Src1.isReg() && TRI->isVectorRegister(MRI, Src1.getReg())) { 4109 Register RegOp1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 4110 BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp1) 4111 .addReg(Src1.getReg()); 4112 Src1.setReg(RegOp1); 4113 } 4114 Register RegOp2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 4115 if (TRI->isVectorRegister(MRI, Src2.getReg())) { 4116 BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp2) 4117 .addReg(Src2.getReg()); 4118 Src2.setReg(RegOp2); 4119 } 4120 4121 const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg()); 4122 unsigned WaveSize = TRI->getRegSizeInBits(*Src2RC); 4123 assert(WaveSize == 64 || WaveSize == 32); 4124 4125 if (WaveSize == 64) { 4126 if (ST.hasScalarCompareEq64()) { 4127 BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64)) 4128 .addReg(Src2.getReg()) 4129 .addImm(0); 4130 } else { 4131 const TargetRegisterClass *SubRC = 4132 TRI->getSubRegClass(Src2RC, AMDGPU::sub0); 4133 MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm( 4134 MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC); 4135 MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm( 4136 MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC); 4137 Register Src2_32 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 4138 4139 BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32) 4140 .add(Src2Sub0) 4141 .add(Src2Sub1); 4142 4143 BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32)) 4144 .addReg(Src2_32, RegState::Kill) 4145 .addImm(0); 4146 } 4147 } else { 4148 BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMPK_LG_U32)) 4149 .addReg(Src2.getReg()) 4150 .addImm(0); 4151 } 4152 4153 BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg()).add(Src0).add(Src1); 4154 4155 unsigned SelOpc = 4156 (WaveSize == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32; 4157 4158 BuildMI(*BB, MII, DL, TII->get(SelOpc), CarryDest.getReg()) 4159 .addImm(-1) 4160 .addImm(0); 4161 4162 MI.eraseFromParent(); 4163 return BB; 4164 } 4165 case AMDGPU::SI_INIT_M0: { 4166 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 4167 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 4168 .add(MI.getOperand(0)); 4169 MI.eraseFromParent(); 4170 return BB; 4171 } 4172 case AMDGPU::GET_GROUPSTATICSIZE: { 4173 assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || 4174 getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL); 4175 DebugLoc DL = MI.getDebugLoc(); 4176 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 4177 .add(MI.getOperand(0)) 4178 .addImm(MFI->getLDSSize()); 4179 MI.eraseFromParent(); 4180 return BB; 4181 } 4182 case AMDGPU::SI_INDIRECT_SRC_V1: 4183 case AMDGPU::SI_INDIRECT_SRC_V2: 4184 case AMDGPU::SI_INDIRECT_SRC_V4: 4185 case AMDGPU::SI_INDIRECT_SRC_V8: 4186 case AMDGPU::SI_INDIRECT_SRC_V16: 4187 case AMDGPU::SI_INDIRECT_SRC_V32: 4188 return emitIndirectSrc(MI, *BB, *getSubtarget()); 4189 case AMDGPU::SI_INDIRECT_DST_V1: 4190 case AMDGPU::SI_INDIRECT_DST_V2: 4191 case AMDGPU::SI_INDIRECT_DST_V4: 4192 case AMDGPU::SI_INDIRECT_DST_V8: 4193 case AMDGPU::SI_INDIRECT_DST_V16: 4194 case AMDGPU::SI_INDIRECT_DST_V32: 4195 return emitIndirectDst(MI, *BB, *getSubtarget()); 4196 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 4197 case AMDGPU::SI_KILL_I1_PSEUDO: 4198 return splitKillBlock(MI, BB); 4199 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 4200 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 4201 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 4202 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4203 4204 Register Dst = MI.getOperand(0).getReg(); 4205 Register Src0 = MI.getOperand(1).getReg(); 4206 Register Src1 = MI.getOperand(2).getReg(); 4207 const DebugLoc &DL = MI.getDebugLoc(); 4208 Register SrcCond = MI.getOperand(3).getReg(); 4209 4210 Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4211 Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4212 const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4213 Register SrcCondCopy = MRI.createVirtualRegister(CondRC); 4214 4215 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) 4216 .addReg(SrcCond); 4217 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 4218 .addImm(0) 4219 .addReg(Src0, 0, AMDGPU::sub0) 4220 .addImm(0) 4221 .addReg(Src1, 0, AMDGPU::sub0) 4222 .addReg(SrcCondCopy); 4223 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 4224 .addImm(0) 4225 .addReg(Src0, 0, AMDGPU::sub1) 4226 .addImm(0) 4227 .addReg(Src1, 0, AMDGPU::sub1) 4228 .addReg(SrcCondCopy); 4229 4230 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 4231 .addReg(DstLo) 4232 .addImm(AMDGPU::sub0) 4233 .addReg(DstHi) 4234 .addImm(AMDGPU::sub1); 4235 MI.eraseFromParent(); 4236 return BB; 4237 } 4238 case AMDGPU::SI_BR_UNDEF: { 4239 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 4240 const DebugLoc &DL = MI.getDebugLoc(); 4241 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 4242 .add(MI.getOperand(0)); 4243 Br->getOperand(1).setIsUndef(true); // read undef SCC 4244 MI.eraseFromParent(); 4245 return BB; 4246 } 4247 case AMDGPU::ADJCALLSTACKUP: 4248 case AMDGPU::ADJCALLSTACKDOWN: { 4249 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 4250 MachineInstrBuilder MIB(*MF, &MI); 4251 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine) 4252 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit); 4253 return BB; 4254 } 4255 case AMDGPU::SI_CALL_ISEL: { 4256 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 4257 const DebugLoc &DL = MI.getDebugLoc(); 4258 4259 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); 4260 4261 MachineInstrBuilder MIB; 4262 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg); 4263 4264 for (const MachineOperand &MO : MI.operands()) 4265 MIB.add(MO); 4266 4267 MIB.cloneMemRefs(MI); 4268 MI.eraseFromParent(); 4269 return BB; 4270 } 4271 case AMDGPU::V_ADD_CO_U32_e32: 4272 case AMDGPU::V_SUB_CO_U32_e32: 4273 case AMDGPU::V_SUBREV_CO_U32_e32: { 4274 // TODO: Define distinct V_*_I32_Pseudo instructions instead. 4275 const DebugLoc &DL = MI.getDebugLoc(); 4276 unsigned Opc = MI.getOpcode(); 4277 4278 bool NeedClampOperand = false; 4279 if (TII->pseudoToMCOpcode(Opc) == -1) { 4280 Opc = AMDGPU::getVOPe64(Opc); 4281 NeedClampOperand = true; 4282 } 4283 4284 auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg()); 4285 if (TII->isVOP3(*I)) { 4286 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 4287 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4288 I.addReg(TRI->getVCC(), RegState::Define); 4289 } 4290 I.add(MI.getOperand(1)) 4291 .add(MI.getOperand(2)); 4292 if (NeedClampOperand) 4293 I.addImm(0); // clamp bit for e64 encoding 4294 4295 TII->legalizeOperands(*I); 4296 4297 MI.eraseFromParent(); 4298 return BB; 4299 } 4300 case AMDGPU::V_ADDC_U32_e32: 4301 case AMDGPU::V_SUBB_U32_e32: 4302 case AMDGPU::V_SUBBREV_U32_e32: 4303 // These instructions have an implicit use of vcc which counts towards the 4304 // constant bus limit. 4305 TII->legalizeOperands(MI); 4306 return BB; 4307 case AMDGPU::DS_GWS_INIT: 4308 case AMDGPU::DS_GWS_SEMA_BR: 4309 case AMDGPU::DS_GWS_BARRIER: 4310 TII->enforceOperandRCAlignment(MI, AMDGPU::OpName::data0); 4311 LLVM_FALLTHROUGH; 4312 case AMDGPU::DS_GWS_SEMA_V: 4313 case AMDGPU::DS_GWS_SEMA_P: 4314 case AMDGPU::DS_GWS_SEMA_RELEASE_ALL: 4315 // A s_waitcnt 0 is required to be the instruction immediately following. 4316 if (getSubtarget()->hasGWSAutoReplay()) { 4317 bundleInstWithWaitcnt(MI); 4318 return BB; 4319 } 4320 4321 return emitGWSMemViolTestLoop(MI, BB); 4322 case AMDGPU::S_SETREG_B32: { 4323 // Try to optimize cases that only set the denormal mode or rounding mode. 4324 // 4325 // If the s_setreg_b32 fully sets all of the bits in the rounding mode or 4326 // denormal mode to a constant, we can use s_round_mode or s_denorm_mode 4327 // instead. 4328 // 4329 // FIXME: This could be predicates on the immediate, but tablegen doesn't 4330 // allow you to have a no side effect instruction in the output of a 4331 // sideeffecting pattern. 4332 unsigned ID, Offset, Width; 4333 AMDGPU::Hwreg::decodeHwreg(MI.getOperand(1).getImm(), ID, Offset, Width); 4334 if (ID != AMDGPU::Hwreg::ID_MODE) 4335 return BB; 4336 4337 const unsigned WidthMask = maskTrailingOnes<unsigned>(Width); 4338 const unsigned SetMask = WidthMask << Offset; 4339 4340 if (getSubtarget()->hasDenormModeInst()) { 4341 unsigned SetDenormOp = 0; 4342 unsigned SetRoundOp = 0; 4343 4344 // The dedicated instructions can only set the whole denorm or round mode 4345 // at once, not a subset of bits in either. 4346 if (SetMask == 4347 (AMDGPU::Hwreg::FP_ROUND_MASK | AMDGPU::Hwreg::FP_DENORM_MASK)) { 4348 // If this fully sets both the round and denorm mode, emit the two 4349 // dedicated instructions for these. 4350 SetRoundOp = AMDGPU::S_ROUND_MODE; 4351 SetDenormOp = AMDGPU::S_DENORM_MODE; 4352 } else if (SetMask == AMDGPU::Hwreg::FP_ROUND_MASK) { 4353 SetRoundOp = AMDGPU::S_ROUND_MODE; 4354 } else if (SetMask == AMDGPU::Hwreg::FP_DENORM_MASK) { 4355 SetDenormOp = AMDGPU::S_DENORM_MODE; 4356 } 4357 4358 if (SetRoundOp || SetDenormOp) { 4359 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 4360 MachineInstr *Def = MRI.getVRegDef(MI.getOperand(0).getReg()); 4361 if (Def && Def->isMoveImmediate() && Def->getOperand(1).isImm()) { 4362 unsigned ImmVal = Def->getOperand(1).getImm(); 4363 if (SetRoundOp) { 4364 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetRoundOp)) 4365 .addImm(ImmVal & 0xf); 4366 4367 // If we also have the denorm mode, get just the denorm mode bits. 4368 ImmVal >>= 4; 4369 } 4370 4371 if (SetDenormOp) { 4372 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetDenormOp)) 4373 .addImm(ImmVal & 0xf); 4374 } 4375 4376 MI.eraseFromParent(); 4377 return BB; 4378 } 4379 } 4380 } 4381 4382 // If only FP bits are touched, used the no side effects pseudo. 4383 if ((SetMask & (AMDGPU::Hwreg::FP_ROUND_MASK | 4384 AMDGPU::Hwreg::FP_DENORM_MASK)) == SetMask) 4385 MI.setDesc(TII->get(AMDGPU::S_SETREG_B32_mode)); 4386 4387 return BB; 4388 } 4389 default: 4390 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 4391 } 4392 } 4393 4394 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const { 4395 return isTypeLegal(VT.getScalarType()); 4396 } 4397 4398 bool SITargetLowering::hasAtomicFaddRtnForTy(SDValue &Op) const { 4399 switch (Op.getValue(0).getSimpleValueType().SimpleTy) { 4400 case MVT::f32: 4401 return Subtarget->hasAtomicFaddRtnInsts(); 4402 case MVT::v2f16: 4403 case MVT::f64: 4404 return Subtarget->hasGFX90AInsts(); 4405 default: 4406 return false; 4407 } 4408 } 4409 4410 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 4411 // This currently forces unfolding various combinations of fsub into fma with 4412 // free fneg'd operands. As long as we have fast FMA (controlled by 4413 // isFMAFasterThanFMulAndFAdd), we should perform these. 4414 4415 // When fma is quarter rate, for f64 where add / sub are at best half rate, 4416 // most of these combines appear to be cycle neutral but save on instruction 4417 // count / code size. 4418 return true; 4419 } 4420 4421 bool SITargetLowering::enableAggressiveFMAFusion(LLT Ty) const { return true; } 4422 4423 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 4424 EVT VT) const { 4425 if (!VT.isVector()) { 4426 return MVT::i1; 4427 } 4428 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 4429 } 4430 4431 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { 4432 // TODO: Should i16 be used always if legal? For now it would force VALU 4433 // shifts. 4434 return (VT == MVT::i16) ? MVT::i16 : MVT::i32; 4435 } 4436 4437 LLT SITargetLowering::getPreferredShiftAmountTy(LLT Ty) const { 4438 return (Ty.getScalarSizeInBits() <= 16 && Subtarget->has16BitInsts()) 4439 ? Ty.changeElementSize(16) 4440 : Ty.changeElementSize(32); 4441 } 4442 4443 // Answering this is somewhat tricky and depends on the specific device which 4444 // have different rates for fma or all f64 operations. 4445 // 4446 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 4447 // regardless of which device (although the number of cycles differs between 4448 // devices), so it is always profitable for f64. 4449 // 4450 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 4451 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 4452 // which we can always do even without fused FP ops since it returns the same 4453 // result as the separate operations and since it is always full 4454 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 4455 // however does not support denormals, so we do report fma as faster if we have 4456 // a fast fma device and require denormals. 4457 // 4458 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 4459 EVT VT) const { 4460 VT = VT.getScalarType(); 4461 4462 switch (VT.getSimpleVT().SimpleTy) { 4463 case MVT::f32: { 4464 // If mad is not available this depends only on if f32 fma is full rate. 4465 if (!Subtarget->hasMadMacF32Insts()) 4466 return Subtarget->hasFastFMAF32(); 4467 4468 // Otherwise f32 mad is always full rate and returns the same result as 4469 // the separate operations so should be preferred over fma. 4470 // However does not support denormals. 4471 if (hasFP32Denormals(MF)) 4472 return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts(); 4473 4474 // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32. 4475 return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts(); 4476 } 4477 case MVT::f64: 4478 return true; 4479 case MVT::f16: 4480 return Subtarget->has16BitInsts() && hasFP64FP16Denormals(MF); 4481 default: 4482 break; 4483 } 4484 4485 return false; 4486 } 4487 4488 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 4489 LLT Ty) const { 4490 switch (Ty.getScalarSizeInBits()) { 4491 case 16: 4492 return isFMAFasterThanFMulAndFAdd(MF, MVT::f16); 4493 case 32: 4494 return isFMAFasterThanFMulAndFAdd(MF, MVT::f32); 4495 case 64: 4496 return isFMAFasterThanFMulAndFAdd(MF, MVT::f64); 4497 default: 4498 break; 4499 } 4500 4501 return false; 4502 } 4503 4504 bool SITargetLowering::isFMADLegal(const MachineInstr &MI, LLT Ty) const { 4505 if (!Ty.isScalar()) 4506 return false; 4507 4508 if (Ty.getScalarSizeInBits() == 16) 4509 return Subtarget->hasMadF16() && !hasFP64FP16Denormals(*MI.getMF()); 4510 if (Ty.getScalarSizeInBits() == 32) 4511 return Subtarget->hasMadMacF32Insts() && !hasFP32Denormals(*MI.getMF()); 4512 4513 return false; 4514 } 4515 4516 bool SITargetLowering::isFMADLegal(const SelectionDAG &DAG, 4517 const SDNode *N) const { 4518 // TODO: Check future ftz flag 4519 // v_mad_f32/v_mac_f32 do not support denormals. 4520 EVT VT = N->getValueType(0); 4521 if (VT == MVT::f32) 4522 return Subtarget->hasMadMacF32Insts() && 4523 !hasFP32Denormals(DAG.getMachineFunction()); 4524 if (VT == MVT::f16) { 4525 return Subtarget->hasMadF16() && 4526 !hasFP64FP16Denormals(DAG.getMachineFunction()); 4527 } 4528 4529 return false; 4530 } 4531 4532 //===----------------------------------------------------------------------===// 4533 // Custom DAG Lowering Operations 4534 //===----------------------------------------------------------------------===// 4535 4536 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the 4537 // wider vector type is legal. 4538 SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op, 4539 SelectionDAG &DAG) const { 4540 unsigned Opc = Op.getOpcode(); 4541 EVT VT = Op.getValueType(); 4542 assert(VT == MVT::v4f16 || VT == MVT::v4i16); 4543 4544 SDValue Lo, Hi; 4545 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); 4546 4547 SDLoc SL(Op); 4548 SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo, 4549 Op->getFlags()); 4550 SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi, 4551 Op->getFlags()); 4552 4553 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); 4554 } 4555 4556 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the 4557 // wider vector type is legal. 4558 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op, 4559 SelectionDAG &DAG) const { 4560 unsigned Opc = Op.getOpcode(); 4561 EVT VT = Op.getValueType(); 4562 assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || 4563 VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i16 || 4564 VT == MVT::v16f16 || VT == MVT::v8f32 || VT == MVT::v16f32 || 4565 VT == MVT::v32f32); 4566 4567 SDValue Lo0, Hi0; 4568 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); 4569 SDValue Lo1, Hi1; 4570 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); 4571 4572 SDLoc SL(Op); 4573 4574 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, 4575 Op->getFlags()); 4576 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, 4577 Op->getFlags()); 4578 4579 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); 4580 } 4581 4582 SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op, 4583 SelectionDAG &DAG) const { 4584 unsigned Opc = Op.getOpcode(); 4585 EVT VT = Op.getValueType(); 4586 assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 || 4587 VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v16i16 || 4588 VT == MVT::v16f16 || VT == MVT::v8f32 || VT == MVT::v16f32 || 4589 VT == MVT::v32f32); 4590 4591 SDValue Lo0, Hi0; 4592 SDValue Op0 = Op.getOperand(0); 4593 std::tie(Lo0, Hi0) = Op0.getValueType().isVector() 4594 ? DAG.SplitVectorOperand(Op.getNode(), 0) 4595 : std::make_pair(Op0, Op0); 4596 SDValue Lo1, Hi1; 4597 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); 4598 SDValue Lo2, Hi2; 4599 std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2); 4600 4601 SDLoc SL(Op); 4602 auto ResVT = DAG.GetSplitDestVTs(VT); 4603 4604 SDValue OpLo = DAG.getNode(Opc, SL, ResVT.first, Lo0, Lo1, Lo2, 4605 Op->getFlags()); 4606 SDValue OpHi = DAG.getNode(Opc, SL, ResVT.second, Hi0, Hi1, Hi2, 4607 Op->getFlags()); 4608 4609 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); 4610 } 4611 4612 4613 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4614 switch (Op.getOpcode()) { 4615 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 4616 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 4617 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4618 case ISD::LOAD: { 4619 SDValue Result = LowerLOAD(Op, DAG); 4620 assert((!Result.getNode() || 4621 Result.getNode()->getNumValues() == 2) && 4622 "Load should return a value and a chain"); 4623 return Result; 4624 } 4625 4626 case ISD::FSIN: 4627 case ISD::FCOS: 4628 return LowerTrig(Op, DAG); 4629 case ISD::SELECT: return LowerSELECT(Op, DAG); 4630 case ISD::FDIV: return LowerFDIV(Op, DAG); 4631 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 4632 case ISD::STORE: return LowerSTORE(Op, DAG); 4633 case ISD::GlobalAddress: { 4634 MachineFunction &MF = DAG.getMachineFunction(); 4635 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 4636 return LowerGlobalAddress(MFI, Op, DAG); 4637 } 4638 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 4639 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 4640 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 4641 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 4642 case ISD::INSERT_SUBVECTOR: 4643 return lowerINSERT_SUBVECTOR(Op, DAG); 4644 case ISD::INSERT_VECTOR_ELT: 4645 return lowerINSERT_VECTOR_ELT(Op, DAG); 4646 case ISD::EXTRACT_VECTOR_ELT: 4647 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 4648 case ISD::VECTOR_SHUFFLE: 4649 return lowerVECTOR_SHUFFLE(Op, DAG); 4650 case ISD::SCALAR_TO_VECTOR: 4651 return lowerSCALAR_TO_VECTOR(Op, DAG); 4652 case ISD::BUILD_VECTOR: 4653 return lowerBUILD_VECTOR(Op, DAG); 4654 case ISD::FP_ROUND: 4655 return lowerFP_ROUND(Op, DAG); 4656 case ISD::FPTRUNC_ROUND: { 4657 unsigned Opc; 4658 SDLoc DL(Op); 4659 4660 if (Op.getOperand(0)->getValueType(0) != MVT::f32) 4661 return SDValue(); 4662 4663 // Get the rounding mode from the last operand 4664 int RoundMode = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 4665 if (RoundMode == (int)RoundingMode::TowardPositive) 4666 Opc = AMDGPUISD::FPTRUNC_ROUND_UPWARD; 4667 else if (RoundMode == (int)RoundingMode::TowardNegative) 4668 Opc = AMDGPUISD::FPTRUNC_ROUND_DOWNWARD; 4669 else 4670 return SDValue(); 4671 4672 return DAG.getNode(Opc, DL, Op.getNode()->getVTList(), Op->getOperand(0)); 4673 } 4674 case ISD::TRAP: 4675 return lowerTRAP(Op, DAG); 4676 case ISD::DEBUGTRAP: 4677 return lowerDEBUGTRAP(Op, DAG); 4678 case ISD::FABS: 4679 case ISD::FNEG: 4680 case ISD::FCANONICALIZE: 4681 case ISD::BSWAP: 4682 return splitUnaryVectorOp(Op, DAG); 4683 case ISD::FMINNUM: 4684 case ISD::FMAXNUM: 4685 return lowerFMINNUM_FMAXNUM(Op, DAG); 4686 case ISD::FMA: 4687 return splitTernaryVectorOp(Op, DAG); 4688 case ISD::FP_TO_SINT: 4689 case ISD::FP_TO_UINT: 4690 return LowerFP_TO_INT(Op, DAG); 4691 case ISD::SHL: 4692 case ISD::SRA: 4693 case ISD::SRL: 4694 case ISD::ADD: 4695 case ISD::SUB: 4696 case ISD::MUL: 4697 case ISD::SMIN: 4698 case ISD::SMAX: 4699 case ISD::UMIN: 4700 case ISD::UMAX: 4701 case ISD::FADD: 4702 case ISD::FMUL: 4703 case ISD::FMINNUM_IEEE: 4704 case ISD::FMAXNUM_IEEE: 4705 case ISD::UADDSAT: 4706 case ISD::USUBSAT: 4707 case ISD::SADDSAT: 4708 case ISD::SSUBSAT: 4709 return splitBinaryVectorOp(Op, DAG); 4710 case ISD::SMULO: 4711 case ISD::UMULO: 4712 return lowerXMULO(Op, DAG); 4713 case ISD::SMUL_LOHI: 4714 case ISD::UMUL_LOHI: 4715 return lowerXMUL_LOHI(Op, DAG); 4716 case ISD::DYNAMIC_STACKALLOC: 4717 return LowerDYNAMIC_STACKALLOC(Op, DAG); 4718 } 4719 return SDValue(); 4720 } 4721 4722 // Used for D16: Casts the result of an instruction into the right vector, 4723 // packs values if loads return unpacked values. 4724 static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT, 4725 const SDLoc &DL, 4726 SelectionDAG &DAG, bool Unpacked) { 4727 if (!LoadVT.isVector()) 4728 return Result; 4729 4730 // Cast back to the original packed type or to a larger type that is a 4731 // multiple of 32 bit for D16. Widening the return type is a required for 4732 // legalization. 4733 EVT FittingLoadVT = LoadVT; 4734 if ((LoadVT.getVectorNumElements() % 2) == 1) { 4735 FittingLoadVT = 4736 EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(), 4737 LoadVT.getVectorNumElements() + 1); 4738 } 4739 4740 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16. 4741 // Truncate to v2i16/v4i16. 4742 EVT IntLoadVT = FittingLoadVT.changeTypeToInteger(); 4743 4744 // Workaround legalizer not scalarizing truncate after vector op 4745 // legalization but not creating intermediate vector trunc. 4746 SmallVector<SDValue, 4> Elts; 4747 DAG.ExtractVectorElements(Result, Elts); 4748 for (SDValue &Elt : Elts) 4749 Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt); 4750 4751 // Pad illegal v1i16/v3fi6 to v4i16 4752 if ((LoadVT.getVectorNumElements() % 2) == 1) 4753 Elts.push_back(DAG.getUNDEF(MVT::i16)); 4754 4755 Result = DAG.getBuildVector(IntLoadVT, DL, Elts); 4756 4757 // Bitcast to original type (v2f16/v4f16). 4758 return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result); 4759 } 4760 4761 // Cast back to the original packed type. 4762 return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result); 4763 } 4764 4765 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode, 4766 MemSDNode *M, 4767 SelectionDAG &DAG, 4768 ArrayRef<SDValue> Ops, 4769 bool IsIntrinsic) const { 4770 SDLoc DL(M); 4771 4772 bool Unpacked = Subtarget->hasUnpackedD16VMem(); 4773 EVT LoadVT = M->getValueType(0); 4774 4775 EVT EquivLoadVT = LoadVT; 4776 if (LoadVT.isVector()) { 4777 if (Unpacked) { 4778 EquivLoadVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, 4779 LoadVT.getVectorNumElements()); 4780 } else if ((LoadVT.getVectorNumElements() % 2) == 1) { 4781 // Widen v3f16 to legal type 4782 EquivLoadVT = 4783 EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(), 4784 LoadVT.getVectorNumElements() + 1); 4785 } 4786 } 4787 4788 // Change from v4f16/v2f16 to EquivLoadVT. 4789 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other); 4790 4791 SDValue Load 4792 = DAG.getMemIntrinsicNode( 4793 IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL, 4794 VTList, Ops, M->getMemoryVT(), 4795 M->getMemOperand()); 4796 4797 SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked); 4798 4799 return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL); 4800 } 4801 4802 SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat, 4803 SelectionDAG &DAG, 4804 ArrayRef<SDValue> Ops) const { 4805 SDLoc DL(M); 4806 EVT LoadVT = M->getValueType(0); 4807 EVT EltType = LoadVT.getScalarType(); 4808 EVT IntVT = LoadVT.changeTypeToInteger(); 4809 4810 bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); 4811 4812 unsigned Opc = 4813 IsFormat ? AMDGPUISD::BUFFER_LOAD_FORMAT : AMDGPUISD::BUFFER_LOAD; 4814 4815 if (IsD16) { 4816 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops); 4817 } 4818 4819 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics 4820 if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32) 4821 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); 4822 4823 if (isTypeLegal(LoadVT)) { 4824 return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT, 4825 M->getMemOperand(), DAG); 4826 } 4827 4828 EVT CastVT = getEquivalentMemType(*DAG.getContext(), LoadVT); 4829 SDVTList VTList = DAG.getVTList(CastVT, MVT::Other); 4830 SDValue MemNode = getMemIntrinsicNode(Opc, DL, VTList, Ops, CastVT, 4831 M->getMemOperand(), DAG); 4832 return DAG.getMergeValues( 4833 {DAG.getNode(ISD::BITCAST, DL, LoadVT, MemNode), MemNode.getValue(1)}, 4834 DL); 4835 } 4836 4837 static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI, 4838 SDNode *N, SelectionDAG &DAG) { 4839 EVT VT = N->getValueType(0); 4840 const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); 4841 unsigned CondCode = CD->getZExtValue(); 4842 if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(CondCode))) 4843 return DAG.getUNDEF(VT); 4844 4845 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 4846 4847 SDValue LHS = N->getOperand(1); 4848 SDValue RHS = N->getOperand(2); 4849 4850 SDLoc DL(N); 4851 4852 EVT CmpVT = LHS.getValueType(); 4853 if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) { 4854 unsigned PromoteOp = ICmpInst::isSigned(IcInput) ? 4855 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4856 LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS); 4857 RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS); 4858 } 4859 4860 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 4861 4862 unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); 4863 EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); 4864 4865 SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS, 4866 DAG.getCondCode(CCOpcode)); 4867 if (VT.bitsEq(CCVT)) 4868 return SetCC; 4869 return DAG.getZExtOrTrunc(SetCC, DL, VT); 4870 } 4871 4872 static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI, 4873 SDNode *N, SelectionDAG &DAG) { 4874 EVT VT = N->getValueType(0); 4875 const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); 4876 4877 unsigned CondCode = CD->getZExtValue(); 4878 if (!FCmpInst::isFPPredicate(static_cast<FCmpInst::Predicate>(CondCode))) 4879 return DAG.getUNDEF(VT); 4880 4881 SDValue Src0 = N->getOperand(1); 4882 SDValue Src1 = N->getOperand(2); 4883 EVT CmpVT = Src0.getValueType(); 4884 SDLoc SL(N); 4885 4886 if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) { 4887 Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 4888 Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 4889 } 4890 4891 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 4892 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 4893 unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); 4894 EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); 4895 SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0, 4896 Src1, DAG.getCondCode(CCOpcode)); 4897 if (VT.bitsEq(CCVT)) 4898 return SetCC; 4899 return DAG.getZExtOrTrunc(SetCC, SL, VT); 4900 } 4901 4902 static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N, 4903 SelectionDAG &DAG) { 4904 EVT VT = N->getValueType(0); 4905 SDValue Src = N->getOperand(1); 4906 SDLoc SL(N); 4907 4908 if (Src.getOpcode() == ISD::SETCC) { 4909 // (ballot (ISD::SETCC ...)) -> (AMDGPUISD::SETCC ...) 4910 return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src.getOperand(0), 4911 Src.getOperand(1), Src.getOperand(2)); 4912 } 4913 if (const ConstantSDNode *Arg = dyn_cast<ConstantSDNode>(Src)) { 4914 // (ballot 0) -> 0 4915 if (Arg->isZero()) 4916 return DAG.getConstant(0, SL, VT); 4917 4918 // (ballot 1) -> EXEC/EXEC_LO 4919 if (Arg->isOne()) { 4920 Register Exec; 4921 if (VT.getScalarSizeInBits() == 32) 4922 Exec = AMDGPU::EXEC_LO; 4923 else if (VT.getScalarSizeInBits() == 64) 4924 Exec = AMDGPU::EXEC; 4925 else 4926 return SDValue(); 4927 4928 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, Exec, VT); 4929 } 4930 } 4931 4932 // (ballot (i1 $src)) -> (AMDGPUISD::SETCC (i32 (zext $src)) (i32 0) 4933 // ISD::SETNE) 4934 return DAG.getNode( 4935 AMDGPUISD::SETCC, SL, VT, DAG.getZExtOrTrunc(Src, SL, MVT::i32), 4936 DAG.getConstant(0, SL, MVT::i32), DAG.getCondCode(ISD::SETNE)); 4937 } 4938 4939 void SITargetLowering::ReplaceNodeResults(SDNode *N, 4940 SmallVectorImpl<SDValue> &Results, 4941 SelectionDAG &DAG) const { 4942 switch (N->getOpcode()) { 4943 case ISD::INSERT_VECTOR_ELT: { 4944 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) 4945 Results.push_back(Res); 4946 return; 4947 } 4948 case ISD::EXTRACT_VECTOR_ELT: { 4949 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) 4950 Results.push_back(Res); 4951 return; 4952 } 4953 case ISD::INTRINSIC_WO_CHAIN: { 4954 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 4955 switch (IID) { 4956 case Intrinsic::amdgcn_cvt_pkrtz: { 4957 SDValue Src0 = N->getOperand(1); 4958 SDValue Src1 = N->getOperand(2); 4959 SDLoc SL(N); 4960 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, 4961 Src0, Src1); 4962 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); 4963 return; 4964 } 4965 case Intrinsic::amdgcn_cvt_pknorm_i16: 4966 case Intrinsic::amdgcn_cvt_pknorm_u16: 4967 case Intrinsic::amdgcn_cvt_pk_i16: 4968 case Intrinsic::amdgcn_cvt_pk_u16: { 4969 SDValue Src0 = N->getOperand(1); 4970 SDValue Src1 = N->getOperand(2); 4971 SDLoc SL(N); 4972 unsigned Opcode; 4973 4974 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16) 4975 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; 4976 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16) 4977 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; 4978 else if (IID == Intrinsic::amdgcn_cvt_pk_i16) 4979 Opcode = AMDGPUISD::CVT_PK_I16_I32; 4980 else 4981 Opcode = AMDGPUISD::CVT_PK_U16_U32; 4982 4983 EVT VT = N->getValueType(0); 4984 if (isTypeLegal(VT)) 4985 Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1)); 4986 else { 4987 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1); 4988 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt)); 4989 } 4990 return; 4991 } 4992 } 4993 break; 4994 } 4995 case ISD::INTRINSIC_W_CHAIN: { 4996 if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) { 4997 if (Res.getOpcode() == ISD::MERGE_VALUES) { 4998 // FIXME: Hacky 4999 for (unsigned I = 0; I < Res.getNumOperands(); I++) { 5000 Results.push_back(Res.getOperand(I)); 5001 } 5002 } else { 5003 Results.push_back(Res); 5004 Results.push_back(Res.getValue(1)); 5005 } 5006 return; 5007 } 5008 5009 break; 5010 } 5011 case ISD::SELECT: { 5012 SDLoc SL(N); 5013 EVT VT = N->getValueType(0); 5014 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 5015 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); 5016 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); 5017 5018 EVT SelectVT = NewVT; 5019 if (NewVT.bitsLT(MVT::i32)) { 5020 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); 5021 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); 5022 SelectVT = MVT::i32; 5023 } 5024 5025 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, 5026 N->getOperand(0), LHS, RHS); 5027 5028 if (NewVT != SelectVT) 5029 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); 5030 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); 5031 return; 5032 } 5033 case ISD::FNEG: { 5034 if (N->getValueType(0) != MVT::v2f16) 5035 break; 5036 5037 SDLoc SL(N); 5038 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); 5039 5040 SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32, 5041 BC, 5042 DAG.getConstant(0x80008000, SL, MVT::i32)); 5043 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); 5044 return; 5045 } 5046 case ISD::FABS: { 5047 if (N->getValueType(0) != MVT::v2f16) 5048 break; 5049 5050 SDLoc SL(N); 5051 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); 5052 5053 SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32, 5054 BC, 5055 DAG.getConstant(0x7fff7fff, SL, MVT::i32)); 5056 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); 5057 return; 5058 } 5059 default: 5060 break; 5061 } 5062 } 5063 5064 /// Helper function for LowerBRCOND 5065 static SDNode *findUser(SDValue Value, unsigned Opcode) { 5066 5067 SDNode *Parent = Value.getNode(); 5068 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 5069 I != E; ++I) { 5070 5071 if (I.getUse().get() != Value) 5072 continue; 5073 5074 if (I->getOpcode() == Opcode) 5075 return *I; 5076 } 5077 return nullptr; 5078 } 5079 5080 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 5081 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 5082 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 5083 case Intrinsic::amdgcn_if: 5084 return AMDGPUISD::IF; 5085 case Intrinsic::amdgcn_else: 5086 return AMDGPUISD::ELSE; 5087 case Intrinsic::amdgcn_loop: 5088 return AMDGPUISD::LOOP; 5089 case Intrinsic::amdgcn_end_cf: 5090 llvm_unreachable("should not occur"); 5091 default: 5092 return 0; 5093 } 5094 } 5095 5096 // break, if_break, else_break are all only used as inputs to loop, not 5097 // directly as branch conditions. 5098 return 0; 5099 } 5100 5101 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { 5102 const Triple &TT = getTargetMachine().getTargetTriple(); 5103 return (GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 5104 GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 5105 AMDGPU::shouldEmitConstantsToTextSection(TT); 5106 } 5107 5108 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { 5109 // FIXME: Either avoid relying on address space here or change the default 5110 // address space for functions to avoid the explicit check. 5111 return (GV->getValueType()->isFunctionTy() || 5112 !isNonGlobalAddrSpace(GV->getAddressSpace())) && 5113 !shouldEmitFixup(GV) && 5114 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 5115 } 5116 5117 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { 5118 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); 5119 } 5120 5121 bool SITargetLowering::shouldUseLDSConstAddress(const GlobalValue *GV) const { 5122 if (!GV->hasExternalLinkage()) 5123 return true; 5124 5125 const auto OS = getTargetMachine().getTargetTriple().getOS(); 5126 return OS == Triple::AMDHSA || OS == Triple::AMDPAL; 5127 } 5128 5129 /// This transforms the control flow intrinsics to get the branch destination as 5130 /// last parameter, also switches branch target with BR if the need arise 5131 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 5132 SelectionDAG &DAG) const { 5133 SDLoc DL(BRCOND); 5134 5135 SDNode *Intr = BRCOND.getOperand(1).getNode(); 5136 SDValue Target = BRCOND.getOperand(2); 5137 SDNode *BR = nullptr; 5138 SDNode *SetCC = nullptr; 5139 5140 if (Intr->getOpcode() == ISD::SETCC) { 5141 // As long as we negate the condition everything is fine 5142 SetCC = Intr; 5143 Intr = SetCC->getOperand(0).getNode(); 5144 5145 } else { 5146 // Get the target from BR if we don't negate the condition 5147 BR = findUser(BRCOND, ISD::BR); 5148 assert(BR && "brcond missing unconditional branch user"); 5149 Target = BR->getOperand(1); 5150 } 5151 5152 unsigned CFNode = isCFIntrinsic(Intr); 5153 if (CFNode == 0) { 5154 // This is a uniform branch so we don't need to legalize. 5155 return BRCOND; 5156 } 5157 5158 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 5159 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 5160 5161 assert(!SetCC || 5162 (SetCC->getConstantOperandVal(1) == 1 && 5163 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 5164 ISD::SETNE)); 5165 5166 // operands of the new intrinsic call 5167 SmallVector<SDValue, 4> Ops; 5168 if (HaveChain) 5169 Ops.push_back(BRCOND.getOperand(0)); 5170 5171 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); 5172 Ops.push_back(Target); 5173 5174 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 5175 5176 // build the new intrinsic call 5177 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); 5178 5179 if (!HaveChain) { 5180 SDValue Ops[] = { 5181 SDValue(Result, 0), 5182 BRCOND.getOperand(0) 5183 }; 5184 5185 Result = DAG.getMergeValues(Ops, DL).getNode(); 5186 } 5187 5188 if (BR) { 5189 // Give the branch instruction our target 5190 SDValue Ops[] = { 5191 BR->getOperand(0), 5192 BRCOND.getOperand(2) 5193 }; 5194 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 5195 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 5196 } 5197 5198 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 5199 5200 // Copy the intrinsic results to registers 5201 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 5202 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 5203 if (!CopyToReg) 5204 continue; 5205 5206 Chain = DAG.getCopyToReg( 5207 Chain, DL, 5208 CopyToReg->getOperand(1), 5209 SDValue(Result, i - 1), 5210 SDValue()); 5211 5212 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 5213 } 5214 5215 // Remove the old intrinsic from the chain 5216 DAG.ReplaceAllUsesOfValueWith( 5217 SDValue(Intr, Intr->getNumValues() - 1), 5218 Intr->getOperand(0)); 5219 5220 return Chain; 5221 } 5222 5223 SDValue SITargetLowering::LowerRETURNADDR(SDValue Op, 5224 SelectionDAG &DAG) const { 5225 MVT VT = Op.getSimpleValueType(); 5226 SDLoc DL(Op); 5227 // Checking the depth 5228 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) 5229 return DAG.getConstant(0, DL, VT); 5230 5231 MachineFunction &MF = DAG.getMachineFunction(); 5232 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 5233 // Check for kernel and shader functions 5234 if (Info->isEntryFunction()) 5235 return DAG.getConstant(0, DL, VT); 5236 5237 MachineFrameInfo &MFI = MF.getFrameInfo(); 5238 // There is a call to @llvm.returnaddress in this function 5239 MFI.setReturnAddressIsTaken(true); 5240 5241 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 5242 // Get the return address reg and mark it as an implicit live-in 5243 Register Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent())); 5244 5245 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); 5246 } 5247 5248 SDValue SITargetLowering::getFPExtOrFPRound(SelectionDAG &DAG, 5249 SDValue Op, 5250 const SDLoc &DL, 5251 EVT VT) const { 5252 return Op.getValueType().bitsLE(VT) ? 5253 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : 5254 DAG.getNode(ISD::FP_ROUND, DL, VT, Op, 5255 DAG.getTargetConstant(0, DL, MVT::i32)); 5256 } 5257 5258 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 5259 assert(Op.getValueType() == MVT::f16 && 5260 "Do not know how to custom lower FP_ROUND for non-f16 type"); 5261 5262 SDValue Src = Op.getOperand(0); 5263 EVT SrcVT = Src.getValueType(); 5264 if (SrcVT != MVT::f64) 5265 return Op; 5266 5267 SDLoc DL(Op); 5268 5269 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); 5270 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); 5271 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); 5272 } 5273 5274 SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op, 5275 SelectionDAG &DAG) const { 5276 EVT VT = Op.getValueType(); 5277 const MachineFunction &MF = DAG.getMachineFunction(); 5278 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 5279 bool IsIEEEMode = Info->getMode().IEEE; 5280 5281 // FIXME: Assert during selection that this is only selected for 5282 // ieee_mode. Currently a combine can produce the ieee version for non-ieee 5283 // mode functions, but this happens to be OK since it's only done in cases 5284 // where there is known no sNaN. 5285 if (IsIEEEMode) 5286 return expandFMINNUM_FMAXNUM(Op.getNode(), DAG); 5287 5288 if (VT == MVT::v4f16 || VT == MVT::v8f16 || VT == MVT::v16f16) 5289 return splitBinaryVectorOp(Op, DAG); 5290 return Op; 5291 } 5292 5293 SDValue SITargetLowering::lowerXMULO(SDValue Op, SelectionDAG &DAG) const { 5294 EVT VT = Op.getValueType(); 5295 SDLoc SL(Op); 5296 SDValue LHS = Op.getOperand(0); 5297 SDValue RHS = Op.getOperand(1); 5298 bool isSigned = Op.getOpcode() == ISD::SMULO; 5299 5300 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { 5301 const APInt &C = RHSC->getAPIntValue(); 5302 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } 5303 if (C.isPowerOf2()) { 5304 // smulo(x, signed_min) is same as umulo(x, signed_min). 5305 bool UseArithShift = isSigned && !C.isMinSignedValue(); 5306 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), SL, MVT::i32); 5307 SDValue Result = DAG.getNode(ISD::SHL, SL, VT, LHS, ShiftAmt); 5308 SDValue Overflow = DAG.getSetCC(SL, MVT::i1, 5309 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, 5310 SL, VT, Result, ShiftAmt), 5311 LHS, ISD::SETNE); 5312 return DAG.getMergeValues({ Result, Overflow }, SL); 5313 } 5314 } 5315 5316 SDValue Result = DAG.getNode(ISD::MUL, SL, VT, LHS, RHS); 5317 SDValue Top = DAG.getNode(isSigned ? ISD::MULHS : ISD::MULHU, 5318 SL, VT, LHS, RHS); 5319 5320 SDValue Sign = isSigned 5321 ? DAG.getNode(ISD::SRA, SL, VT, Result, 5322 DAG.getConstant(VT.getScalarSizeInBits() - 1, SL, MVT::i32)) 5323 : DAG.getConstant(0, SL, VT); 5324 SDValue Overflow = DAG.getSetCC(SL, MVT::i1, Top, Sign, ISD::SETNE); 5325 5326 return DAG.getMergeValues({ Result, Overflow }, SL); 5327 } 5328 5329 SDValue SITargetLowering::lowerXMUL_LOHI(SDValue Op, SelectionDAG &DAG) const { 5330 if (Op->isDivergent()) { 5331 // Select to V_MAD_[IU]64_[IU]32. 5332 return Op; 5333 } 5334 if (Subtarget->hasSMulHi()) { 5335 // Expand to S_MUL_I32 + S_MUL_HI_[IU]32. 5336 return SDValue(); 5337 } 5338 // The multiply is uniform but we would have to use V_MUL_HI_[IU]32 to 5339 // calculate the high part, so we might as well do the whole thing with 5340 // V_MAD_[IU]64_[IU]32. 5341 return Op; 5342 } 5343 5344 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { 5345 if (!Subtarget->isTrapHandlerEnabled() || 5346 Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) 5347 return lowerTrapEndpgm(Op, DAG); 5348 5349 if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) { 5350 switch (*HsaAbiVer) { 5351 case ELF::ELFABIVERSION_AMDGPU_HSA_V2: 5352 case ELF::ELFABIVERSION_AMDGPU_HSA_V3: 5353 return lowerTrapHsaQueuePtr(Op, DAG); 5354 case ELF::ELFABIVERSION_AMDGPU_HSA_V4: 5355 case ELF::ELFABIVERSION_AMDGPU_HSA_V5: 5356 return Subtarget->supportsGetDoorbellID() ? 5357 lowerTrapHsa(Op, DAG) : lowerTrapHsaQueuePtr(Op, DAG); 5358 } 5359 } 5360 5361 llvm_unreachable("Unknown trap handler"); 5362 } 5363 5364 SDValue SITargetLowering::lowerTrapEndpgm( 5365 SDValue Op, SelectionDAG &DAG) const { 5366 SDLoc SL(Op); 5367 SDValue Chain = Op.getOperand(0); 5368 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); 5369 } 5370 5371 SDValue SITargetLowering::loadImplicitKernelArgument(SelectionDAG &DAG, MVT VT, 5372 const SDLoc &DL, Align Alignment, ImplicitParameter Param) const { 5373 MachineFunction &MF = DAG.getMachineFunction(); 5374 uint64_t Offset = getImplicitParameterOffset(MF, Param); 5375 SDValue Ptr = lowerKernArgParameterPtr(DAG, DL, DAG.getEntryNode(), Offset); 5376 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 5377 return DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, PtrInfo, Alignment, 5378 MachineMemOperand::MODereferenceable | 5379 MachineMemOperand::MOInvariant); 5380 } 5381 5382 SDValue SITargetLowering::lowerTrapHsaQueuePtr( 5383 SDValue Op, SelectionDAG &DAG) const { 5384 SDLoc SL(Op); 5385 SDValue Chain = Op.getOperand(0); 5386 5387 SDValue QueuePtr; 5388 // For code object version 5, QueuePtr is passed through implicit kernarg. 5389 if (AMDGPU::getAmdhsaCodeObjectVersion() == 5) { 5390 QueuePtr = 5391 loadImplicitKernelArgument(DAG, MVT::i64, SL, Align(8), QUEUE_PTR); 5392 } else { 5393 MachineFunction &MF = DAG.getMachineFunction(); 5394 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 5395 Register UserSGPR = Info->getQueuePtrUserSGPR(); 5396 5397 if (UserSGPR == AMDGPU::NoRegister) { 5398 // We probably are in a function incorrectly marked with 5399 // amdgpu-no-queue-ptr. This is undefined. We don't want to delete the 5400 // trap, so just use a null pointer. 5401 QueuePtr = DAG.getConstant(0, SL, MVT::i64); 5402 } else { 5403 QueuePtr = CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, UserSGPR, 5404 MVT::i64); 5405 } 5406 } 5407 5408 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); 5409 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, 5410 QueuePtr, SDValue()); 5411 5412 uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap); 5413 SDValue Ops[] = { 5414 ToReg, 5415 DAG.getTargetConstant(TrapID, SL, MVT::i16), 5416 SGPR01, 5417 ToReg.getValue(1) 5418 }; 5419 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 5420 } 5421 5422 SDValue SITargetLowering::lowerTrapHsa( 5423 SDValue Op, SelectionDAG &DAG) const { 5424 SDLoc SL(Op); 5425 SDValue Chain = Op.getOperand(0); 5426 5427 uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap); 5428 SDValue Ops[] = { 5429 Chain, 5430 DAG.getTargetConstant(TrapID, SL, MVT::i16) 5431 }; 5432 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 5433 } 5434 5435 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const { 5436 SDLoc SL(Op); 5437 SDValue Chain = Op.getOperand(0); 5438 MachineFunction &MF = DAG.getMachineFunction(); 5439 5440 if (!Subtarget->isTrapHandlerEnabled() || 5441 Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) { 5442 DiagnosticInfoUnsupported NoTrap(MF.getFunction(), 5443 "debugtrap handler not supported", 5444 Op.getDebugLoc(), 5445 DS_Warning); 5446 LLVMContext &Ctx = MF.getFunction().getContext(); 5447 Ctx.diagnose(NoTrap); 5448 return Chain; 5449 } 5450 5451 uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSADebugTrap); 5452 SDValue Ops[] = { 5453 Chain, 5454 DAG.getTargetConstant(TrapID, SL, MVT::i16) 5455 }; 5456 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 5457 } 5458 5459 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, 5460 SelectionDAG &DAG) const { 5461 // FIXME: Use inline constants (src_{shared, private}_base) instead. 5462 if (Subtarget->hasApertureRegs()) { 5463 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ? 5464 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : 5465 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; 5466 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ? 5467 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : 5468 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; 5469 unsigned Encoding = 5470 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | 5471 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | 5472 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; 5473 5474 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); 5475 SDValue ApertureReg = SDValue( 5476 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); 5477 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); 5478 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); 5479 } 5480 5481 // For code object version 5, private_base and shared_base are passed through 5482 // implicit kernargs. 5483 if (AMDGPU::getAmdhsaCodeObjectVersion() == 5) { 5484 ImplicitParameter Param = 5485 (AS == AMDGPUAS::LOCAL_ADDRESS) ? SHARED_BASE : PRIVATE_BASE; 5486 return loadImplicitKernelArgument(DAG, MVT::i32, DL, Align(4), Param); 5487 } 5488 5489 MachineFunction &MF = DAG.getMachineFunction(); 5490 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 5491 Register UserSGPR = Info->getQueuePtrUserSGPR(); 5492 if (UserSGPR == AMDGPU::NoRegister) { 5493 // We probably are in a function incorrectly marked with 5494 // amdgpu-no-queue-ptr. This is undefined. 5495 return DAG.getUNDEF(MVT::i32); 5496 } 5497 5498 SDValue QueuePtr = CreateLiveInRegister( 5499 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 5500 5501 // Offset into amd_queue_t for group_segment_aperture_base_hi / 5502 // private_segment_aperture_base_hi. 5503 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; 5504 5505 SDValue Ptr = 5506 DAG.getObjectPtrOffset(DL, QueuePtr, TypeSize::Fixed(StructOffset)); 5507 5508 // TODO: Use custom target PseudoSourceValue. 5509 // TODO: We should use the value from the IR intrinsic call, but it might not 5510 // be available and how do we get it? 5511 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 5512 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, 5513 commonAlignment(Align(64), StructOffset), 5514 MachineMemOperand::MODereferenceable | 5515 MachineMemOperand::MOInvariant); 5516 } 5517 5518 /// Return true if the value is a known valid address, such that a null check is 5519 /// not necessary. 5520 static bool isKnownNonNull(SDValue Val, SelectionDAG &DAG, 5521 const AMDGPUTargetMachine &TM, unsigned AddrSpace) { 5522 if (isa<FrameIndexSDNode>(Val) || isa<GlobalAddressSDNode>(Val) || 5523 isa<BasicBlockSDNode>(Val)) 5524 return true; 5525 5526 if (auto *ConstVal = dyn_cast<ConstantSDNode>(Val)) 5527 return ConstVal->getSExtValue() != TM.getNullPointerValue(AddrSpace); 5528 5529 // TODO: Search through arithmetic, handle arguments and loads 5530 // marked nonnull. 5531 return false; 5532 } 5533 5534 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 5535 SelectionDAG &DAG) const { 5536 SDLoc SL(Op); 5537 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 5538 5539 SDValue Src = ASC->getOperand(0); 5540 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 5541 unsigned SrcAS = ASC->getSrcAddressSpace(); 5542 5543 const AMDGPUTargetMachine &TM = 5544 static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); 5545 5546 // flat -> local/private 5547 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) { 5548 unsigned DestAS = ASC->getDestAddressSpace(); 5549 5550 if (DestAS == AMDGPUAS::LOCAL_ADDRESS || 5551 DestAS == AMDGPUAS::PRIVATE_ADDRESS) { 5552 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 5553 5554 if (isKnownNonNull(Src, DAG, TM, SrcAS)) 5555 return Ptr; 5556 5557 unsigned NullVal = TM.getNullPointerValue(DestAS); 5558 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 5559 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 5560 5561 return DAG.getNode(ISD::SELECT, SL, MVT::i32, NonNull, Ptr, 5562 SegmentNullPtr); 5563 } 5564 } 5565 5566 // local/private -> flat 5567 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { 5568 if (SrcAS == AMDGPUAS::LOCAL_ADDRESS || 5569 SrcAS == AMDGPUAS::PRIVATE_ADDRESS) { 5570 5571 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); 5572 SDValue CvtPtr = 5573 DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 5574 CvtPtr = DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr); 5575 5576 if (isKnownNonNull(Src, DAG, TM, SrcAS)) 5577 return CvtPtr; 5578 5579 unsigned NullVal = TM.getNullPointerValue(SrcAS); 5580 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 5581 5582 SDValue NonNull 5583 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 5584 5585 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, CvtPtr, 5586 FlatNullPtr); 5587 } 5588 } 5589 5590 if (SrcAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT && 5591 Op.getValueType() == MVT::i64) { 5592 const SIMachineFunctionInfo *Info = 5593 DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>(); 5594 SDValue Hi = DAG.getConstant(Info->get32BitAddressHighBits(), SL, MVT::i32); 5595 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Hi); 5596 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 5597 } 5598 5599 if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT && 5600 Src.getValueType() == MVT::i64) 5601 return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 5602 5603 // global <-> flat are no-ops and never emitted. 5604 5605 const MachineFunction &MF = DAG.getMachineFunction(); 5606 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 5607 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 5608 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 5609 5610 return DAG.getUNDEF(ASC->getValueType(0)); 5611 } 5612 5613 // This lowers an INSERT_SUBVECTOR by extracting the individual elements from 5614 // the small vector and inserting them into the big vector. That is better than 5615 // the default expansion of doing it via a stack slot. Even though the use of 5616 // the stack slot would be optimized away afterwards, the stack slot itself 5617 // remains. 5618 SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, 5619 SelectionDAG &DAG) const { 5620 SDValue Vec = Op.getOperand(0); 5621 SDValue Ins = Op.getOperand(1); 5622 SDValue Idx = Op.getOperand(2); 5623 EVT VecVT = Vec.getValueType(); 5624 EVT InsVT = Ins.getValueType(); 5625 EVT EltVT = VecVT.getVectorElementType(); 5626 unsigned InsNumElts = InsVT.getVectorNumElements(); 5627 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 5628 SDLoc SL(Op); 5629 5630 for (unsigned I = 0; I != InsNumElts; ++I) { 5631 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins, 5632 DAG.getConstant(I, SL, MVT::i32)); 5633 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt, 5634 DAG.getConstant(IdxVal + I, SL, MVT::i32)); 5635 } 5636 return Vec; 5637 } 5638 5639 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 5640 SelectionDAG &DAG) const { 5641 SDValue Vec = Op.getOperand(0); 5642 SDValue InsVal = Op.getOperand(1); 5643 SDValue Idx = Op.getOperand(2); 5644 EVT VecVT = Vec.getValueType(); 5645 EVT EltVT = VecVT.getVectorElementType(); 5646 unsigned VecSize = VecVT.getSizeInBits(); 5647 unsigned EltSize = EltVT.getSizeInBits(); 5648 SDLoc SL(Op); 5649 5650 // Specially handle the case of v4i16 with static indexing. 5651 unsigned NumElts = VecVT.getVectorNumElements(); 5652 auto KIdx = dyn_cast<ConstantSDNode>(Idx); 5653 if (NumElts == 4 && EltSize == 16 && KIdx) { 5654 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec); 5655 5656 SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, 5657 DAG.getConstant(0, SL, MVT::i32)); 5658 SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, 5659 DAG.getConstant(1, SL, MVT::i32)); 5660 5661 SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf); 5662 SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf); 5663 5664 unsigned Idx = KIdx->getZExtValue(); 5665 bool InsertLo = Idx < 2; 5666 SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16, 5667 InsertLo ? LoVec : HiVec, 5668 DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal), 5669 DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32)); 5670 5671 InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf); 5672 5673 SDValue Concat = InsertLo ? 5674 DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) : 5675 DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf }); 5676 5677 return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat); 5678 } 5679 5680 // Static indexing does not lower to stack access, and hence there is no need 5681 // for special custom lowering to avoid stack access. 5682 if (isa<ConstantSDNode>(Idx)) 5683 return SDValue(); 5684 5685 // Avoid stack access for dynamic indexing by custom lowering to 5686 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec 5687 5688 assert(VecSize <= 64 && "Expected target vector size to be <= 64 bits"); 5689 5690 MVT IntVT = MVT::getIntegerVT(VecSize); 5691 5692 // Convert vector index to bit-index and get the required bit mask. 5693 assert(isPowerOf2_32(EltSize)); 5694 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); 5695 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); 5696 SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT, 5697 DAG.getConstant(0xffff, SL, IntVT), 5698 ScaledIdx); 5699 5700 // 1. Create a congruent vector with the target value in each element. 5701 SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT, 5702 DAG.getSplatBuildVector(VecVT, SL, InsVal)); 5703 5704 // 2. Mask off all other indicies except the required index within (1). 5705 SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal); 5706 5707 // 3. Mask off the required index within the target vector. 5708 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); 5709 SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT, 5710 DAG.getNOT(SL, BFM, IntVT), BCVec); 5711 5712 // 4. Get (2) and (3) ORed into the target vector. 5713 SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS); 5714 5715 return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI); 5716 } 5717 5718 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 5719 SelectionDAG &DAG) const { 5720 SDLoc SL(Op); 5721 5722 EVT ResultVT = Op.getValueType(); 5723 SDValue Vec = Op.getOperand(0); 5724 SDValue Idx = Op.getOperand(1); 5725 EVT VecVT = Vec.getValueType(); 5726 unsigned VecSize = VecVT.getSizeInBits(); 5727 EVT EltVT = VecVT.getVectorElementType(); 5728 5729 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); 5730 5731 // Make sure we do any optimizations that will make it easier to fold 5732 // source modifiers before obscuring it with bit operations. 5733 5734 // XXX - Why doesn't this get called when vector_shuffle is expanded? 5735 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) 5736 return Combined; 5737 5738 if (VecSize == 128 || VecSize == 256) { 5739 SDValue Lo, Hi; 5740 EVT LoVT, HiVT; 5741 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT); 5742 5743 if (VecSize == 128) { 5744 SDValue V2 = DAG.getBitcast(MVT::v2i64, Vec); 5745 Lo = DAG.getBitcast(LoVT, 5746 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64, V2, 5747 DAG.getConstant(0, SL, MVT::i32))); 5748 Hi = DAG.getBitcast(HiVT, 5749 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64, V2, 5750 DAG.getConstant(1, SL, MVT::i32))); 5751 } else { 5752 assert(VecSize == 256); 5753 5754 SDValue V2 = DAG.getBitcast(MVT::v4i64, Vec); 5755 SDValue Parts[4]; 5756 for (unsigned P = 0; P < 4; ++P) { 5757 Parts[P] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64, V2, 5758 DAG.getConstant(P, SL, MVT::i32)); 5759 } 5760 5761 Lo = DAG.getBitcast(LoVT, DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i64, 5762 Parts[0], Parts[1])); 5763 Hi = DAG.getBitcast(HiVT, DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i64, 5764 Parts[2], Parts[3])); 5765 } 5766 5767 EVT IdxVT = Idx.getValueType(); 5768 unsigned NElem = VecVT.getVectorNumElements(); 5769 assert(isPowerOf2_32(NElem)); 5770 SDValue IdxMask = DAG.getConstant(NElem / 2 - 1, SL, IdxVT); 5771 SDValue NewIdx = DAG.getNode(ISD::AND, SL, IdxVT, Idx, IdxMask); 5772 SDValue Half = DAG.getSelectCC(SL, Idx, IdxMask, Hi, Lo, ISD::SETUGT); 5773 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Half, NewIdx); 5774 } 5775 5776 assert(VecSize <= 64); 5777 5778 MVT IntVT = MVT::getIntegerVT(VecSize); 5779 5780 // If Vec is just a SCALAR_TO_VECTOR, then use the scalar integer directly. 5781 SDValue VecBC = peekThroughBitcasts(Vec); 5782 if (VecBC.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5783 SDValue Src = VecBC.getOperand(0); 5784 Src = DAG.getBitcast(Src.getValueType().changeTypeToInteger(), Src); 5785 Vec = DAG.getAnyExtOrTrunc(Src, SL, IntVT); 5786 } 5787 5788 unsigned EltSize = EltVT.getSizeInBits(); 5789 assert(isPowerOf2_32(EltSize)); 5790 5791 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); 5792 5793 // Convert vector index to bit-index (* EltSize) 5794 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); 5795 5796 SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); 5797 SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx); 5798 5799 if (ResultVT == MVT::f16) { 5800 SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt); 5801 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 5802 } 5803 5804 return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT); 5805 } 5806 5807 static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) { 5808 assert(Elt % 2 == 0); 5809 return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0); 5810 } 5811 5812 SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, 5813 SelectionDAG &DAG) const { 5814 SDLoc SL(Op); 5815 EVT ResultVT = Op.getValueType(); 5816 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 5817 5818 EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16; 5819 EVT EltVT = PackVT.getVectorElementType(); 5820 int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements(); 5821 5822 // vector_shuffle <0,1,6,7> lhs, rhs 5823 // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2) 5824 // 5825 // vector_shuffle <6,7,2,3> lhs, rhs 5826 // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2) 5827 // 5828 // vector_shuffle <6,7,0,1> lhs, rhs 5829 // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0) 5830 5831 // Avoid scalarizing when both halves are reading from consecutive elements. 5832 SmallVector<SDValue, 4> Pieces; 5833 for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) { 5834 if (elementPairIsContiguous(SVN->getMask(), I)) { 5835 const int Idx = SVN->getMaskElt(I); 5836 int VecIdx = Idx < SrcNumElts ? 0 : 1; 5837 int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts; 5838 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, 5839 PackVT, SVN->getOperand(VecIdx), 5840 DAG.getConstant(EltIdx, SL, MVT::i32)); 5841 Pieces.push_back(SubVec); 5842 } else { 5843 const int Idx0 = SVN->getMaskElt(I); 5844 const int Idx1 = SVN->getMaskElt(I + 1); 5845 int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1; 5846 int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1; 5847 int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts; 5848 int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts; 5849 5850 SDValue Vec0 = SVN->getOperand(VecIdx0); 5851 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 5852 Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32)); 5853 5854 SDValue Vec1 = SVN->getOperand(VecIdx1); 5855 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 5856 Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32)); 5857 Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 })); 5858 } 5859 } 5860 5861 return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces); 5862 } 5863 5864 SDValue SITargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op, 5865 SelectionDAG &DAG) const { 5866 SDValue SVal = Op.getOperand(0); 5867 EVT ResultVT = Op.getValueType(); 5868 EVT SValVT = SVal.getValueType(); 5869 SDValue UndefVal = DAG.getUNDEF(SValVT); 5870 SDLoc SL(Op); 5871 5872 SmallVector<SDValue, 8> VElts; 5873 VElts.push_back(SVal); 5874 for (int I = 1, E = ResultVT.getVectorNumElements(); I < E; ++I) 5875 VElts.push_back(UndefVal); 5876 5877 return DAG.getBuildVector(ResultVT, SL, VElts); 5878 } 5879 5880 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op, 5881 SelectionDAG &DAG) const { 5882 SDLoc SL(Op); 5883 EVT VT = Op.getValueType(); 5884 5885 if (VT == MVT::v4i16 || VT == MVT::v4f16 || 5886 VT == MVT::v8i16 || VT == MVT::v8f16) { 5887 EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 5888 VT.getVectorNumElements() / 2); 5889 MVT HalfIntVT = MVT::getIntegerVT(HalfVT.getSizeInBits()); 5890 5891 // Turn into pair of packed build_vectors. 5892 // TODO: Special case for constants that can be materialized with s_mov_b64. 5893 SmallVector<SDValue, 4> LoOps, HiOps; 5894 for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I != E; ++I) { 5895 LoOps.push_back(Op.getOperand(I)); 5896 HiOps.push_back(Op.getOperand(I + E)); 5897 } 5898 SDValue Lo = DAG.getBuildVector(HalfVT, SL, LoOps); 5899 SDValue Hi = DAG.getBuildVector(HalfVT, SL, HiOps); 5900 5901 SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, HalfIntVT, Lo); 5902 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, HalfIntVT, Hi); 5903 5904 SDValue Blend = DAG.getBuildVector(MVT::getVectorVT(HalfIntVT, 2), SL, 5905 { CastLo, CastHi }); 5906 return DAG.getNode(ISD::BITCAST, SL, VT, Blend); 5907 } 5908 5909 if (VT == MVT::v16i16 || VT == MVT::v16f16) { 5910 EVT QuarterVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 5911 VT.getVectorNumElements() / 4); 5912 MVT QuarterIntVT = MVT::getIntegerVT(QuarterVT.getSizeInBits()); 5913 5914 SmallVector<SDValue, 4> Parts[4]; 5915 for (unsigned I = 0, E = VT.getVectorNumElements() / 4; I != E; ++I) { 5916 for (unsigned P = 0; P < 4; ++P) 5917 Parts[P].push_back(Op.getOperand(I + P * E)); 5918 } 5919 SDValue Casts[4]; 5920 for (unsigned P = 0; P < 4; ++P) { 5921 SDValue Vec = DAG.getBuildVector(QuarterVT, SL, Parts[P]); 5922 Casts[P] = DAG.getNode(ISD::BITCAST, SL, QuarterIntVT, Vec); 5923 } 5924 5925 SDValue Blend = 5926 DAG.getBuildVector(MVT::getVectorVT(QuarterIntVT, 4), SL, Casts); 5927 return DAG.getNode(ISD::BITCAST, SL, VT, Blend); 5928 } 5929 5930 assert(VT == MVT::v2f16 || VT == MVT::v2i16); 5931 assert(!Subtarget->hasVOP3PInsts() && "this should be legal"); 5932 5933 SDValue Lo = Op.getOperand(0); 5934 SDValue Hi = Op.getOperand(1); 5935 5936 // Avoid adding defined bits with the zero_extend. 5937 if (Hi.isUndef()) { 5938 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); 5939 SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo); 5940 return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo); 5941 } 5942 5943 Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi); 5944 Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi); 5945 5946 SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi, 5947 DAG.getConstant(16, SL, MVT::i32)); 5948 if (Lo.isUndef()) 5949 return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi); 5950 5951 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); 5952 Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo); 5953 5954 SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi); 5955 return DAG.getNode(ISD::BITCAST, SL, VT, Or); 5956 } 5957 5958 bool 5959 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 5960 // We can fold offsets for anything that doesn't require a GOT relocation. 5961 return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || 5962 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 5963 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 5964 !shouldEmitGOTReloc(GA->getGlobal()); 5965 } 5966 5967 static SDValue 5968 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 5969 const SDLoc &DL, int64_t Offset, EVT PtrVT, 5970 unsigned GAFlags = SIInstrInfo::MO_NONE) { 5971 assert(isInt<32>(Offset + 4) && "32-bit offset is expected!"); 5972 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 5973 // lowered to the following code sequence: 5974 // 5975 // For constant address space: 5976 // s_getpc_b64 s[0:1] 5977 // s_add_u32 s0, s0, $symbol 5978 // s_addc_u32 s1, s1, 0 5979 // 5980 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 5981 // a fixup or relocation is emitted to replace $symbol with a literal 5982 // constant, which is a pc-relative offset from the encoding of the $symbol 5983 // operand to the global variable. 5984 // 5985 // For global address space: 5986 // s_getpc_b64 s[0:1] 5987 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 5988 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 5989 // 5990 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 5991 // fixups or relocations are emitted to replace $symbol@*@lo and 5992 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 5993 // which is a 64-bit pc-relative offset from the encoding of the $symbol 5994 // operand to the global variable. 5995 // 5996 // What we want here is an offset from the value returned by s_getpc 5997 // (which is the address of the s_add_u32 instruction) to the global 5998 // variable, but since the encoding of $symbol starts 4 bytes after the start 5999 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 6000 // small. This requires us to add 4 to the global variable offset in order to 6001 // compute the correct address. Similarly for the s_addc_u32 instruction, the 6002 // encoding of $symbol starts 12 bytes after the start of the s_add_u32 6003 // instruction. 6004 SDValue PtrLo = 6005 DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags); 6006 SDValue PtrHi; 6007 if (GAFlags == SIInstrInfo::MO_NONE) { 6008 PtrHi = DAG.getTargetConstant(0, DL, MVT::i32); 6009 } else { 6010 PtrHi = 6011 DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 12, GAFlags + 1); 6012 } 6013 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 6014 } 6015 6016 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 6017 SDValue Op, 6018 SelectionDAG &DAG) const { 6019 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 6020 SDLoc DL(GSD); 6021 EVT PtrVT = Op.getValueType(); 6022 6023 const GlobalValue *GV = GSD->getGlobal(); 6024 if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && 6025 shouldUseLDSConstAddress(GV)) || 6026 GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS || 6027 GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 6028 if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && 6029 GV->hasExternalLinkage()) { 6030 Type *Ty = GV->getValueType(); 6031 // HIP uses an unsized array `extern __shared__ T s[]` or similar 6032 // zero-sized type in other languages to declare the dynamic shared 6033 // memory which size is not known at the compile time. They will be 6034 // allocated by the runtime and placed directly after the static 6035 // allocated ones. They all share the same offset. 6036 if (DAG.getDataLayout().getTypeAllocSize(Ty).isZero()) { 6037 assert(PtrVT == MVT::i32 && "32-bit pointer is expected."); 6038 // Adjust alignment for that dynamic shared memory array. 6039 MFI->setDynLDSAlign(DAG.getDataLayout(), *cast<GlobalVariable>(GV)); 6040 return SDValue( 6041 DAG.getMachineNode(AMDGPU::GET_GROUPSTATICSIZE, DL, PtrVT), 0); 6042 } 6043 } 6044 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 6045 } 6046 6047 if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 6048 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(), 6049 SIInstrInfo::MO_ABS32_LO); 6050 return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA); 6051 } 6052 6053 if (shouldEmitFixup(GV)) 6054 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 6055 else if (shouldEmitPCReloc(GV)) 6056 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 6057 SIInstrInfo::MO_REL32); 6058 6059 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 6060 SIInstrInfo::MO_GOTPCREL32); 6061 6062 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 6063 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 6064 const DataLayout &DataLayout = DAG.getDataLayout(); 6065 Align Alignment = DataLayout.getABITypeAlign(PtrTy); 6066 MachinePointerInfo PtrInfo 6067 = MachinePointerInfo::getGOT(DAG.getMachineFunction()); 6068 6069 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Alignment, 6070 MachineMemOperand::MODereferenceable | 6071 MachineMemOperand::MOInvariant); 6072 } 6073 6074 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 6075 const SDLoc &DL, SDValue V) const { 6076 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 6077 // the destination register. 6078 // 6079 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 6080 // so we will end up with redundant moves to m0. 6081 // 6082 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 6083 6084 // A Null SDValue creates a glue result. 6085 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 6086 V, Chain); 6087 return SDValue(M0, 0); 6088 } 6089 6090 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 6091 SDValue Op, 6092 MVT VT, 6093 unsigned Offset) const { 6094 SDLoc SL(Op); 6095 SDValue Param = lowerKernargMemParameter( 6096 DAG, MVT::i32, MVT::i32, SL, DAG.getEntryNode(), Offset, Align(4), false); 6097 // The local size values will have the hi 16-bits as zero. 6098 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 6099 DAG.getValueType(VT)); 6100 } 6101 6102 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 6103 EVT VT) { 6104 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 6105 "non-hsa intrinsic with hsa target", 6106 DL.getDebugLoc()); 6107 DAG.getContext()->diagnose(BadIntrin); 6108 return DAG.getUNDEF(VT); 6109 } 6110 6111 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 6112 EVT VT) { 6113 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 6114 "intrinsic not supported on subtarget", 6115 DL.getDebugLoc()); 6116 DAG.getContext()->diagnose(BadIntrin); 6117 return DAG.getUNDEF(VT); 6118 } 6119 6120 static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL, 6121 ArrayRef<SDValue> Elts) { 6122 assert(!Elts.empty()); 6123 MVT Type; 6124 unsigned NumElts = Elts.size(); 6125 6126 if (NumElts <= 8) { 6127 Type = MVT::getVectorVT(MVT::f32, NumElts); 6128 } else { 6129 assert(Elts.size() <= 16); 6130 Type = MVT::v16f32; 6131 NumElts = 16; 6132 } 6133 6134 SmallVector<SDValue, 16> VecElts(NumElts); 6135 for (unsigned i = 0; i < Elts.size(); ++i) { 6136 SDValue Elt = Elts[i]; 6137 if (Elt.getValueType() != MVT::f32) 6138 Elt = DAG.getBitcast(MVT::f32, Elt); 6139 VecElts[i] = Elt; 6140 } 6141 for (unsigned i = Elts.size(); i < NumElts; ++i) 6142 VecElts[i] = DAG.getUNDEF(MVT::f32); 6143 6144 if (NumElts == 1) 6145 return VecElts[0]; 6146 return DAG.getBuildVector(Type, DL, VecElts); 6147 } 6148 6149 static SDValue padEltsToUndef(SelectionDAG &DAG, const SDLoc &DL, EVT CastVT, 6150 SDValue Src, int ExtraElts) { 6151 EVT SrcVT = Src.getValueType(); 6152 6153 SmallVector<SDValue, 8> Elts; 6154 6155 if (SrcVT.isVector()) 6156 DAG.ExtractVectorElements(Src, Elts); 6157 else 6158 Elts.push_back(Src); 6159 6160 SDValue Undef = DAG.getUNDEF(SrcVT.getScalarType()); 6161 while (ExtraElts--) 6162 Elts.push_back(Undef); 6163 6164 return DAG.getBuildVector(CastVT, DL, Elts); 6165 } 6166 6167 // Re-construct the required return value for a image load intrinsic. 6168 // This is more complicated due to the optional use TexFailCtrl which means the required 6169 // return type is an aggregate 6170 static SDValue constructRetValue(SelectionDAG &DAG, 6171 MachineSDNode *Result, 6172 ArrayRef<EVT> ResultTypes, 6173 bool IsTexFail, bool Unpacked, bool IsD16, 6174 int DMaskPop, int NumVDataDwords, 6175 const SDLoc &DL) { 6176 // Determine the required return type. This is the same regardless of IsTexFail flag 6177 EVT ReqRetVT = ResultTypes[0]; 6178 int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1; 6179 int NumDataDwords = (!IsD16 || (IsD16 && Unpacked)) ? 6180 ReqRetNumElts : (ReqRetNumElts + 1) / 2; 6181 6182 int MaskPopDwords = (!IsD16 || (IsD16 && Unpacked)) ? 6183 DMaskPop : (DMaskPop + 1) / 2; 6184 6185 MVT DataDwordVT = NumDataDwords == 1 ? 6186 MVT::i32 : MVT::getVectorVT(MVT::i32, NumDataDwords); 6187 6188 MVT MaskPopVT = MaskPopDwords == 1 ? 6189 MVT::i32 : MVT::getVectorVT(MVT::i32, MaskPopDwords); 6190 6191 SDValue Data(Result, 0); 6192 SDValue TexFail; 6193 6194 if (DMaskPop > 0 && Data.getValueType() != MaskPopVT) { 6195 SDValue ZeroIdx = DAG.getConstant(0, DL, MVT::i32); 6196 if (MaskPopVT.isVector()) { 6197 Data = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MaskPopVT, 6198 SDValue(Result, 0), ZeroIdx); 6199 } else { 6200 Data = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MaskPopVT, 6201 SDValue(Result, 0), ZeroIdx); 6202 } 6203 } 6204 6205 if (DataDwordVT.isVector()) 6206 Data = padEltsToUndef(DAG, DL, DataDwordVT, Data, 6207 NumDataDwords - MaskPopDwords); 6208 6209 if (IsD16) 6210 Data = adjustLoadValueTypeImpl(Data, ReqRetVT, DL, DAG, Unpacked); 6211 6212 EVT LegalReqRetVT = ReqRetVT; 6213 if (!ReqRetVT.isVector()) { 6214 if (!Data.getValueType().isInteger()) 6215 Data = DAG.getNode(ISD::BITCAST, DL, 6216 Data.getValueType().changeTypeToInteger(), Data); 6217 Data = DAG.getNode(ISD::TRUNCATE, DL, ReqRetVT.changeTypeToInteger(), Data); 6218 } else { 6219 // We need to widen the return vector to a legal type 6220 if ((ReqRetVT.getVectorNumElements() % 2) == 1 && 6221 ReqRetVT.getVectorElementType().getSizeInBits() == 16) { 6222 LegalReqRetVT = 6223 EVT::getVectorVT(*DAG.getContext(), ReqRetVT.getVectorElementType(), 6224 ReqRetVT.getVectorNumElements() + 1); 6225 } 6226 } 6227 Data = DAG.getNode(ISD::BITCAST, DL, LegalReqRetVT, Data); 6228 6229 if (IsTexFail) { 6230 TexFail = 6231 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, SDValue(Result, 0), 6232 DAG.getConstant(MaskPopDwords, DL, MVT::i32)); 6233 6234 return DAG.getMergeValues({Data, TexFail, SDValue(Result, 1)}, DL); 6235 } 6236 6237 if (Result->getNumValues() == 1) 6238 return Data; 6239 6240 return DAG.getMergeValues({Data, SDValue(Result, 1)}, DL); 6241 } 6242 6243 static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE, 6244 SDValue *LWE, bool &IsTexFail) { 6245 auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode()); 6246 6247 uint64_t Value = TexFailCtrlConst->getZExtValue(); 6248 if (Value) { 6249 IsTexFail = true; 6250 } 6251 6252 SDLoc DL(TexFailCtrlConst); 6253 *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); 6254 Value &= ~(uint64_t)0x1; 6255 *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); 6256 Value &= ~(uint64_t)0x2; 6257 6258 return Value == 0; 6259 } 6260 6261 static void packImage16bitOpsToDwords(SelectionDAG &DAG, SDValue Op, 6262 MVT PackVectorVT, 6263 SmallVectorImpl<SDValue> &PackedAddrs, 6264 unsigned DimIdx, unsigned EndIdx, 6265 unsigned NumGradients) { 6266 SDLoc DL(Op); 6267 for (unsigned I = DimIdx; I < EndIdx; I++) { 6268 SDValue Addr = Op.getOperand(I); 6269 6270 // Gradients are packed with undef for each coordinate. 6271 // In <hi 16 bit>,<lo 16 bit> notation, the registers look like this: 6272 // 1D: undef,dx/dh; undef,dx/dv 6273 // 2D: dy/dh,dx/dh; dy/dv,dx/dv 6274 // 3D: dy/dh,dx/dh; undef,dz/dh; dy/dv,dx/dv; undef,dz/dv 6275 if (((I + 1) >= EndIdx) || 6276 ((NumGradients / 2) % 2 == 1 && (I == DimIdx + (NumGradients / 2) - 1 || 6277 I == DimIdx + NumGradients - 1))) { 6278 if (Addr.getValueType() != MVT::i16) 6279 Addr = DAG.getBitcast(MVT::i16, Addr); 6280 Addr = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Addr); 6281 } else { 6282 Addr = DAG.getBuildVector(PackVectorVT, DL, {Addr, Op.getOperand(I + 1)}); 6283 I++; 6284 } 6285 Addr = DAG.getBitcast(MVT::f32, Addr); 6286 PackedAddrs.push_back(Addr); 6287 } 6288 } 6289 6290 SDValue SITargetLowering::lowerImage(SDValue Op, 6291 const AMDGPU::ImageDimIntrinsicInfo *Intr, 6292 SelectionDAG &DAG, bool WithChain) const { 6293 SDLoc DL(Op); 6294 MachineFunction &MF = DAG.getMachineFunction(); 6295 const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>(); 6296 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 6297 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); 6298 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); 6299 unsigned IntrOpcode = Intr->BaseOpcode; 6300 bool IsGFX10Plus = AMDGPU::isGFX10Plus(*Subtarget); 6301 bool IsGFX11Plus = AMDGPU::isGFX11Plus(*Subtarget); 6302 6303 SmallVector<EVT, 3> ResultTypes(Op->values()); 6304 SmallVector<EVT, 3> OrigResultTypes(Op->values()); 6305 bool IsD16 = false; 6306 bool IsG16 = false; 6307 bool IsA16 = false; 6308 SDValue VData; 6309 int NumVDataDwords; 6310 bool AdjustRetType = false; 6311 6312 // Offset of intrinsic arguments 6313 const unsigned ArgOffset = WithChain ? 2 : 1; 6314 6315 unsigned DMask; 6316 unsigned DMaskLanes = 0; 6317 6318 if (BaseOpcode->Atomic) { 6319 VData = Op.getOperand(2); 6320 6321 bool Is64Bit = VData.getValueType() == MVT::i64; 6322 if (BaseOpcode->AtomicX2) { 6323 SDValue VData2 = Op.getOperand(3); 6324 VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL, 6325 {VData, VData2}); 6326 if (Is64Bit) 6327 VData = DAG.getBitcast(MVT::v4i32, VData); 6328 6329 ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32; 6330 DMask = Is64Bit ? 0xf : 0x3; 6331 NumVDataDwords = Is64Bit ? 4 : 2; 6332 } else { 6333 DMask = Is64Bit ? 0x3 : 0x1; 6334 NumVDataDwords = Is64Bit ? 2 : 1; 6335 } 6336 } else { 6337 auto *DMaskConst = 6338 cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->DMaskIndex)); 6339 DMask = DMaskConst->getZExtValue(); 6340 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); 6341 6342 if (BaseOpcode->Store) { 6343 VData = Op.getOperand(2); 6344 6345 MVT StoreVT = VData.getSimpleValueType(); 6346 if (StoreVT.getScalarType() == MVT::f16) { 6347 if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) 6348 return Op; // D16 is unsupported for this instruction 6349 6350 IsD16 = true; 6351 VData = handleD16VData(VData, DAG, true); 6352 } 6353 6354 NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32; 6355 } else { 6356 // Work out the num dwords based on the dmask popcount and underlying type 6357 // and whether packing is supported. 6358 MVT LoadVT = ResultTypes[0].getSimpleVT(); 6359 if (LoadVT.getScalarType() == MVT::f16) { 6360 if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) 6361 return Op; // D16 is unsupported for this instruction 6362 6363 IsD16 = true; 6364 } 6365 6366 // Confirm that the return type is large enough for the dmask specified 6367 if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) || 6368 (!LoadVT.isVector() && DMaskLanes > 1)) 6369 return Op; 6370 6371 // The sq block of gfx8 and gfx9 do not estimate register use correctly 6372 // for d16 image_gather4, image_gather4_l, and image_gather4_lz 6373 // instructions. 6374 if (IsD16 && !Subtarget->hasUnpackedD16VMem() && 6375 !(BaseOpcode->Gather4 && Subtarget->hasImageGather4D16Bug())) 6376 NumVDataDwords = (DMaskLanes + 1) / 2; 6377 else 6378 NumVDataDwords = DMaskLanes; 6379 6380 AdjustRetType = true; 6381 } 6382 } 6383 6384 unsigned VAddrEnd = ArgOffset + Intr->VAddrEnd; 6385 SmallVector<SDValue, 4> VAddrs; 6386 6387 // Check for 16 bit addresses or derivatives and pack if true. 6388 MVT VAddrVT = 6389 Op.getOperand(ArgOffset + Intr->GradientStart).getSimpleValueType(); 6390 MVT VAddrScalarVT = VAddrVT.getScalarType(); 6391 MVT GradPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; 6392 IsG16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16; 6393 6394 VAddrVT = Op.getOperand(ArgOffset + Intr->CoordStart).getSimpleValueType(); 6395 VAddrScalarVT = VAddrVT.getScalarType(); 6396 MVT AddrPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; 6397 IsA16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16; 6398 6399 // Push back extra arguments. 6400 for (unsigned I = Intr->VAddrStart; I < Intr->GradientStart; I++) { 6401 if (IsA16 && (Op.getOperand(ArgOffset + I).getValueType() == MVT::f16)) { 6402 assert(I == Intr->BiasIndex && "Got unexpected 16-bit extra argument"); 6403 // Special handling of bias when A16 is on. Bias is of type half but 6404 // occupies full 32-bit. 6405 SDValue Bias = DAG.getBuildVector( 6406 MVT::v2f16, DL, 6407 {Op.getOperand(ArgOffset + I), DAG.getUNDEF(MVT::f16)}); 6408 VAddrs.push_back(Bias); 6409 } else { 6410 assert((!IsA16 || Intr->NumBiasArgs == 0 || I != Intr->BiasIndex) && 6411 "Bias needs to be converted to 16 bit in A16 mode"); 6412 VAddrs.push_back(Op.getOperand(ArgOffset + I)); 6413 } 6414 } 6415 6416 if (BaseOpcode->Gradients && !ST->hasG16() && (IsA16 != IsG16)) { 6417 // 16 bit gradients are supported, but are tied to the A16 control 6418 // so both gradients and addresses must be 16 bit 6419 LLVM_DEBUG( 6420 dbgs() << "Failed to lower image intrinsic: 16 bit addresses " 6421 "require 16 bit args for both gradients and addresses"); 6422 return Op; 6423 } 6424 6425 if (IsA16) { 6426 if (!ST->hasA16()) { 6427 LLVM_DEBUG(dbgs() << "Failed to lower image intrinsic: Target does not " 6428 "support 16 bit addresses\n"); 6429 return Op; 6430 } 6431 } 6432 6433 // We've dealt with incorrect input so we know that if IsA16, IsG16 6434 // are set then we have to compress/pack operands (either address, 6435 // gradient or both) 6436 // In the case where a16 and gradients are tied (no G16 support) then we 6437 // have already verified that both IsA16 and IsG16 are true 6438 if (BaseOpcode->Gradients && IsG16 && ST->hasG16()) { 6439 // Activate g16 6440 const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = 6441 AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); 6442 IntrOpcode = G16MappingInfo->G16; // set new opcode to variant with _g16 6443 } 6444 6445 // Add gradients (packed or unpacked) 6446 if (IsG16) { 6447 // Pack the gradients 6448 // const int PackEndIdx = IsA16 ? VAddrEnd : (ArgOffset + Intr->CoordStart); 6449 packImage16bitOpsToDwords(DAG, Op, GradPackVectorVT, VAddrs, 6450 ArgOffset + Intr->GradientStart, 6451 ArgOffset + Intr->CoordStart, Intr->NumGradients); 6452 } else { 6453 for (unsigned I = ArgOffset + Intr->GradientStart; 6454 I < ArgOffset + Intr->CoordStart; I++) 6455 VAddrs.push_back(Op.getOperand(I)); 6456 } 6457 6458 // Add addresses (packed or unpacked) 6459 if (IsA16) { 6460 packImage16bitOpsToDwords(DAG, Op, AddrPackVectorVT, VAddrs, 6461 ArgOffset + Intr->CoordStart, VAddrEnd, 6462 0 /* No gradients */); 6463 } else { 6464 // Add uncompressed address 6465 for (unsigned I = ArgOffset + Intr->CoordStart; I < VAddrEnd; I++) 6466 VAddrs.push_back(Op.getOperand(I)); 6467 } 6468 6469 // If the register allocator cannot place the address registers contiguously 6470 // without introducing moves, then using the non-sequential address encoding 6471 // is always preferable, since it saves VALU instructions and is usually a 6472 // wash in terms of code size or even better. 6473 // 6474 // However, we currently have no way of hinting to the register allocator that 6475 // MIMG addresses should be placed contiguously when it is possible to do so, 6476 // so force non-NSA for the common 2-address case as a heuristic. 6477 // 6478 // SIShrinkInstructions will convert NSA encodings to non-NSA after register 6479 // allocation when possible. 6480 // 6481 // TODO: we can actually allow partial NSA where the final register is a 6482 // contiguous set of the remaining addresses. 6483 // This could help where there are more addresses than supported. 6484 bool UseNSA = ST->hasFeature(AMDGPU::FeatureNSAEncoding) && 6485 VAddrs.size() >= 3 && 6486 VAddrs.size() <= (unsigned)ST->getNSAMaxSize(); 6487 SDValue VAddr; 6488 if (!UseNSA) 6489 VAddr = getBuildDwordsVector(DAG, DL, VAddrs); 6490 6491 SDValue True = DAG.getTargetConstant(1, DL, MVT::i1); 6492 SDValue False = DAG.getTargetConstant(0, DL, MVT::i1); 6493 SDValue Unorm; 6494 if (!BaseOpcode->Sampler) { 6495 Unorm = True; 6496 } else { 6497 auto UnormConst = 6498 cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->UnormIndex)); 6499 6500 Unorm = UnormConst->getZExtValue() ? True : False; 6501 } 6502 6503 SDValue TFE; 6504 SDValue LWE; 6505 SDValue TexFail = Op.getOperand(ArgOffset + Intr->TexFailCtrlIndex); 6506 bool IsTexFail = false; 6507 if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail)) 6508 return Op; 6509 6510 if (IsTexFail) { 6511 if (!DMaskLanes) { 6512 // Expecting to get an error flag since TFC is on - and dmask is 0 6513 // Force dmask to be at least 1 otherwise the instruction will fail 6514 DMask = 0x1; 6515 DMaskLanes = 1; 6516 NumVDataDwords = 1; 6517 } 6518 NumVDataDwords += 1; 6519 AdjustRetType = true; 6520 } 6521 6522 // Has something earlier tagged that the return type needs adjusting 6523 // This happens if the instruction is a load or has set TexFailCtrl flags 6524 if (AdjustRetType) { 6525 // NumVDataDwords reflects the true number of dwords required in the return type 6526 if (DMaskLanes == 0 && !BaseOpcode->Store) { 6527 // This is a no-op load. This can be eliminated 6528 SDValue Undef = DAG.getUNDEF(Op.getValueType()); 6529 if (isa<MemSDNode>(Op)) 6530 return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL); 6531 return Undef; 6532 } 6533 6534 EVT NewVT = NumVDataDwords > 1 ? 6535 EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumVDataDwords) 6536 : MVT::i32; 6537 6538 ResultTypes[0] = NewVT; 6539 if (ResultTypes.size() == 3) { 6540 // Original result was aggregate type used for TexFailCtrl results 6541 // The actual instruction returns as a vector type which has now been 6542 // created. Remove the aggregate result. 6543 ResultTypes.erase(&ResultTypes[1]); 6544 } 6545 } 6546 6547 unsigned CPol = cast<ConstantSDNode>( 6548 Op.getOperand(ArgOffset + Intr->CachePolicyIndex))->getZExtValue(); 6549 if (BaseOpcode->Atomic) 6550 CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization 6551 if (CPol & ~AMDGPU::CPol::ALL) 6552 return Op; 6553 6554 SmallVector<SDValue, 26> Ops; 6555 if (BaseOpcode->Store || BaseOpcode->Atomic) 6556 Ops.push_back(VData); // vdata 6557 if (UseNSA) 6558 append_range(Ops, VAddrs); 6559 else 6560 Ops.push_back(VAddr); 6561 Ops.push_back(Op.getOperand(ArgOffset + Intr->RsrcIndex)); 6562 if (BaseOpcode->Sampler) 6563 Ops.push_back(Op.getOperand(ArgOffset + Intr->SampIndex)); 6564 Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32)); 6565 if (IsGFX10Plus) 6566 Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32)); 6567 Ops.push_back(Unorm); 6568 Ops.push_back(DAG.getTargetConstant(CPol, DL, MVT::i32)); 6569 Ops.push_back(IsA16 && // r128, a16 for gfx9 6570 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False); 6571 if (IsGFX10Plus) 6572 Ops.push_back(IsA16 ? True : False); 6573 if (!Subtarget->hasGFX90AInsts()) { 6574 Ops.push_back(TFE); //tfe 6575 } else if (cast<ConstantSDNode>(TFE)->getZExtValue()) { 6576 report_fatal_error("TFE is not supported on this GPU"); 6577 } 6578 Ops.push_back(LWE); // lwe 6579 if (!IsGFX10Plus) 6580 Ops.push_back(DimInfo->DA ? True : False); 6581 if (BaseOpcode->HasD16) 6582 Ops.push_back(IsD16 ? True : False); 6583 if (isa<MemSDNode>(Op)) 6584 Ops.push_back(Op.getOperand(0)); // chain 6585 6586 int NumVAddrDwords = 6587 UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32; 6588 int Opcode = -1; 6589 6590 if (IsGFX11Plus) { 6591 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, 6592 UseNSA ? AMDGPU::MIMGEncGfx11NSA 6593 : AMDGPU::MIMGEncGfx11Default, 6594 NumVDataDwords, NumVAddrDwords); 6595 } else if (IsGFX10Plus) { 6596 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, 6597 UseNSA ? AMDGPU::MIMGEncGfx10NSA 6598 : AMDGPU::MIMGEncGfx10Default, 6599 NumVDataDwords, NumVAddrDwords); 6600 } else { 6601 if (Subtarget->hasGFX90AInsts()) { 6602 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a, 6603 NumVDataDwords, NumVAddrDwords); 6604 if (Opcode == -1) 6605 report_fatal_error( 6606 "requested image instruction is not supported on this GPU"); 6607 } 6608 if (Opcode == -1 && 6609 Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 6610 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, 6611 NumVDataDwords, NumVAddrDwords); 6612 if (Opcode == -1) 6613 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, 6614 NumVDataDwords, NumVAddrDwords); 6615 } 6616 assert(Opcode != -1); 6617 6618 MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops); 6619 if (auto MemOp = dyn_cast<MemSDNode>(Op)) { 6620 MachineMemOperand *MemRef = MemOp->getMemOperand(); 6621 DAG.setNodeMemRefs(NewNode, {MemRef}); 6622 } 6623 6624 if (BaseOpcode->AtomicX2) { 6625 SmallVector<SDValue, 1> Elt; 6626 DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1); 6627 return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL); 6628 } 6629 if (BaseOpcode->Store) 6630 return SDValue(NewNode, 0); 6631 return constructRetValue(DAG, NewNode, 6632 OrigResultTypes, IsTexFail, 6633 Subtarget->hasUnpackedD16VMem(), IsD16, 6634 DMaskLanes, NumVDataDwords, DL); 6635 } 6636 6637 SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, 6638 SDValue Offset, SDValue CachePolicy, 6639 SelectionDAG &DAG) const { 6640 MachineFunction &MF = DAG.getMachineFunction(); 6641 6642 const DataLayout &DataLayout = DAG.getDataLayout(); 6643 Align Alignment = 6644 DataLayout.getABITypeAlign(VT.getTypeForEVT(*DAG.getContext())); 6645 6646 MachineMemOperand *MMO = MF.getMachineMemOperand( 6647 MachinePointerInfo(), 6648 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 6649 MachineMemOperand::MOInvariant, 6650 VT.getStoreSize(), Alignment); 6651 6652 if (!Offset->isDivergent()) { 6653 SDValue Ops[] = { 6654 Rsrc, 6655 Offset, // Offset 6656 CachePolicy 6657 }; 6658 6659 // Widen vec3 load to vec4. 6660 if (VT.isVector() && VT.getVectorNumElements() == 3) { 6661 EVT WidenedVT = 6662 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4); 6663 auto WidenedOp = DAG.getMemIntrinsicNode( 6664 AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT, 6665 MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize())); 6666 auto Subvector = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp, 6667 DAG.getVectorIdxConstant(0, DL)); 6668 return Subvector; 6669 } 6670 6671 return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL, 6672 DAG.getVTList(VT), Ops, VT, MMO); 6673 } 6674 6675 // We have a divergent offset. Emit a MUBUF buffer load instead. We can 6676 // assume that the buffer is unswizzled. 6677 SmallVector<SDValue, 4> Loads; 6678 unsigned NumLoads = 1; 6679 MVT LoadVT = VT.getSimpleVT(); 6680 unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1; 6681 assert((LoadVT.getScalarType() == MVT::i32 || 6682 LoadVT.getScalarType() == MVT::f32)); 6683 6684 if (NumElts == 8 || NumElts == 16) { 6685 NumLoads = NumElts / 4; 6686 LoadVT = MVT::getVectorVT(LoadVT.getScalarType(), 4); 6687 } 6688 6689 SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue}); 6690 SDValue Ops[] = { 6691 DAG.getEntryNode(), // Chain 6692 Rsrc, // rsrc 6693 DAG.getConstant(0, DL, MVT::i32), // vindex 6694 {}, // voffset 6695 {}, // soffset 6696 {}, // offset 6697 CachePolicy, // cachepolicy 6698 DAG.getTargetConstant(0, DL, MVT::i1), // idxen 6699 }; 6700 6701 // Use the alignment to ensure that the required offsets will fit into the 6702 // immediate offsets. 6703 setBufferOffsets(Offset, DAG, &Ops[3], 6704 NumLoads > 1 ? Align(16 * NumLoads) : Align(4)); 6705 6706 uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue(); 6707 for (unsigned i = 0; i < NumLoads; ++i) { 6708 Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32); 6709 Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops, 6710 LoadVT, MMO, DAG)); 6711 } 6712 6713 if (NumElts == 8 || NumElts == 16) 6714 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads); 6715 6716 return Loads[0]; 6717 } 6718 6719 SDValue SITargetLowering::lowerWorkitemID(SelectionDAG &DAG, SDValue Op, 6720 unsigned Dim, 6721 const ArgDescriptor &Arg) const { 6722 SDLoc SL(Op); 6723 MachineFunction &MF = DAG.getMachineFunction(); 6724 unsigned MaxID = Subtarget->getMaxWorkitemID(MF.getFunction(), Dim); 6725 if (MaxID == 0) 6726 return DAG.getConstant(0, SL, MVT::i32); 6727 6728 SDValue Val = loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 6729 SDLoc(DAG.getEntryNode()), Arg); 6730 6731 // Don't bother inserting AssertZext for packed IDs since we're emitting the 6732 // masking operations anyway. 6733 // 6734 // TODO: We could assert the top bit is 0 for the source copy. 6735 if (Arg.isMasked()) 6736 return Val; 6737 6738 // Preserve the known bits after expansion to a copy. 6739 EVT SmallVT = 6740 EVT::getIntegerVT(*DAG.getContext(), 32 - countLeadingZeros(MaxID)); 6741 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Val, 6742 DAG.getValueType(SmallVT)); 6743 } 6744 6745 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 6746 SelectionDAG &DAG) const { 6747 MachineFunction &MF = DAG.getMachineFunction(); 6748 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 6749 6750 EVT VT = Op.getValueType(); 6751 SDLoc DL(Op); 6752 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 6753 6754 // TODO: Should this propagate fast-math-flags? 6755 6756 switch (IntrinsicID) { 6757 case Intrinsic::amdgcn_implicit_buffer_ptr: { 6758 if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction())) 6759 return emitNonHSAIntrinsicError(DAG, DL, VT); 6760 return getPreloadedValue(DAG, *MFI, VT, 6761 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR); 6762 } 6763 case Intrinsic::amdgcn_dispatch_ptr: 6764 case Intrinsic::amdgcn_queue_ptr: { 6765 if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) { 6766 DiagnosticInfoUnsupported BadIntrin( 6767 MF.getFunction(), "unsupported hsa intrinsic without hsa target", 6768 DL.getDebugLoc()); 6769 DAG.getContext()->diagnose(BadIntrin); 6770 return DAG.getUNDEF(VT); 6771 } 6772 6773 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 6774 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR; 6775 return getPreloadedValue(DAG, *MFI, VT, RegID); 6776 } 6777 case Intrinsic::amdgcn_implicitarg_ptr: { 6778 if (MFI->isEntryFunction()) 6779 return getImplicitArgPtr(DAG, DL); 6780 return getPreloadedValue(DAG, *MFI, VT, 6781 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 6782 } 6783 case Intrinsic::amdgcn_kernarg_segment_ptr: { 6784 if (!AMDGPU::isKernel(MF.getFunction().getCallingConv())) { 6785 // This only makes sense to call in a kernel, so just lower to null. 6786 return DAG.getConstant(0, DL, VT); 6787 } 6788 6789 return getPreloadedValue(DAG, *MFI, VT, 6790 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 6791 } 6792 case Intrinsic::amdgcn_dispatch_id: { 6793 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID); 6794 } 6795 case Intrinsic::amdgcn_rcp: 6796 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 6797 case Intrinsic::amdgcn_rsq: 6798 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 6799 case Intrinsic::amdgcn_rsq_legacy: 6800 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 6801 return emitRemovedIntrinsicError(DAG, DL, VT); 6802 return SDValue(); 6803 case Intrinsic::amdgcn_rcp_legacy: 6804 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 6805 return emitRemovedIntrinsicError(DAG, DL, VT); 6806 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 6807 case Intrinsic::amdgcn_rsq_clamp: { 6808 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) 6809 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 6810 6811 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 6812 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 6813 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 6814 6815 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 6816 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 6817 DAG.getConstantFP(Max, DL, VT)); 6818 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 6819 DAG.getConstantFP(Min, DL, VT)); 6820 } 6821 case Intrinsic::r600_read_ngroups_x: 6822 if (Subtarget->isAmdHsaOS()) 6823 return emitNonHSAIntrinsicError(DAG, DL, VT); 6824 6825 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 6826 SI::KernelInputOffsets::NGROUPS_X, Align(4), 6827 false); 6828 case Intrinsic::r600_read_ngroups_y: 6829 if (Subtarget->isAmdHsaOS()) 6830 return emitNonHSAIntrinsicError(DAG, DL, VT); 6831 6832 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 6833 SI::KernelInputOffsets::NGROUPS_Y, Align(4), 6834 false); 6835 case Intrinsic::r600_read_ngroups_z: 6836 if (Subtarget->isAmdHsaOS()) 6837 return emitNonHSAIntrinsicError(DAG, DL, VT); 6838 6839 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 6840 SI::KernelInputOffsets::NGROUPS_Z, Align(4), 6841 false); 6842 case Intrinsic::r600_read_global_size_x: 6843 if (Subtarget->isAmdHsaOS()) 6844 return emitNonHSAIntrinsicError(DAG, DL, VT); 6845 6846 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 6847 SI::KernelInputOffsets::GLOBAL_SIZE_X, 6848 Align(4), false); 6849 case Intrinsic::r600_read_global_size_y: 6850 if (Subtarget->isAmdHsaOS()) 6851 return emitNonHSAIntrinsicError(DAG, DL, VT); 6852 6853 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 6854 SI::KernelInputOffsets::GLOBAL_SIZE_Y, 6855 Align(4), false); 6856 case Intrinsic::r600_read_global_size_z: 6857 if (Subtarget->isAmdHsaOS()) 6858 return emitNonHSAIntrinsicError(DAG, DL, VT); 6859 6860 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 6861 SI::KernelInputOffsets::GLOBAL_SIZE_Z, 6862 Align(4), false); 6863 case Intrinsic::r600_read_local_size_x: 6864 if (Subtarget->isAmdHsaOS()) 6865 return emitNonHSAIntrinsicError(DAG, DL, VT); 6866 6867 return lowerImplicitZextParam(DAG, Op, MVT::i16, 6868 SI::KernelInputOffsets::LOCAL_SIZE_X); 6869 case Intrinsic::r600_read_local_size_y: 6870 if (Subtarget->isAmdHsaOS()) 6871 return emitNonHSAIntrinsicError(DAG, DL, VT); 6872 6873 return lowerImplicitZextParam(DAG, Op, MVT::i16, 6874 SI::KernelInputOffsets::LOCAL_SIZE_Y); 6875 case Intrinsic::r600_read_local_size_z: 6876 if (Subtarget->isAmdHsaOS()) 6877 return emitNonHSAIntrinsicError(DAG, DL, VT); 6878 6879 return lowerImplicitZextParam(DAG, Op, MVT::i16, 6880 SI::KernelInputOffsets::LOCAL_SIZE_Z); 6881 case Intrinsic::amdgcn_workgroup_id_x: 6882 return getPreloadedValue(DAG, *MFI, VT, 6883 AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 6884 case Intrinsic::amdgcn_workgroup_id_y: 6885 return getPreloadedValue(DAG, *MFI, VT, 6886 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 6887 case Intrinsic::amdgcn_workgroup_id_z: 6888 return getPreloadedValue(DAG, *MFI, VT, 6889 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 6890 case Intrinsic::amdgcn_workitem_id_x: 6891 return lowerWorkitemID(DAG, Op, 0, MFI->getArgInfo().WorkItemIDX); 6892 case Intrinsic::amdgcn_workitem_id_y: 6893 return lowerWorkitemID(DAG, Op, 1, MFI->getArgInfo().WorkItemIDY); 6894 case Intrinsic::amdgcn_workitem_id_z: 6895 return lowerWorkitemID(DAG, Op, 2, MFI->getArgInfo().WorkItemIDZ); 6896 case Intrinsic::amdgcn_wavefrontsize: 6897 return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(), 6898 SDLoc(Op), MVT::i32); 6899 case Intrinsic::amdgcn_s_buffer_load: { 6900 unsigned CPol = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 6901 if (CPol & ~AMDGPU::CPol::ALL) 6902 return Op; 6903 return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 6904 DAG); 6905 } 6906 case Intrinsic::amdgcn_fdiv_fast: 6907 return lowerFDIV_FAST(Op, DAG); 6908 case Intrinsic::amdgcn_sin: 6909 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 6910 6911 case Intrinsic::amdgcn_cos: 6912 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 6913 6914 case Intrinsic::amdgcn_mul_u24: 6915 return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2)); 6916 case Intrinsic::amdgcn_mul_i24: 6917 return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2)); 6918 6919 case Intrinsic::amdgcn_log_clamp: { 6920 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) 6921 return SDValue(); 6922 6923 return emitRemovedIntrinsicError(DAG, DL, VT); 6924 } 6925 case Intrinsic::amdgcn_ldexp: 6926 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 6927 Op.getOperand(1), Op.getOperand(2)); 6928 6929 case Intrinsic::amdgcn_fract: 6930 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 6931 6932 case Intrinsic::amdgcn_class: 6933 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 6934 Op.getOperand(1), Op.getOperand(2)); 6935 case Intrinsic::amdgcn_div_fmas: 6936 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 6937 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 6938 Op.getOperand(4)); 6939 6940 case Intrinsic::amdgcn_div_fixup: 6941 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 6942 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 6943 6944 case Intrinsic::amdgcn_div_scale: { 6945 const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3)); 6946 6947 // Translate to the operands expected by the machine instruction. The 6948 // first parameter must be the same as the first instruction. 6949 SDValue Numerator = Op.getOperand(1); 6950 SDValue Denominator = Op.getOperand(2); 6951 6952 // Note this order is opposite of the machine instruction's operations, 6953 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 6954 // intrinsic has the numerator as the first operand to match a normal 6955 // division operation. 6956 6957 SDValue Src0 = Param->isAllOnes() ? Numerator : Denominator; 6958 6959 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 6960 Denominator, Numerator); 6961 } 6962 case Intrinsic::amdgcn_icmp: { 6963 // There is a Pat that handles this variant, so return it as-is. 6964 if (Op.getOperand(1).getValueType() == MVT::i1 && 6965 Op.getConstantOperandVal(2) == 0 && 6966 Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE) 6967 return Op; 6968 return lowerICMPIntrinsic(*this, Op.getNode(), DAG); 6969 } 6970 case Intrinsic::amdgcn_fcmp: { 6971 return lowerFCMPIntrinsic(*this, Op.getNode(), DAG); 6972 } 6973 case Intrinsic::amdgcn_ballot: 6974 return lowerBALLOTIntrinsic(*this, Op.getNode(), DAG); 6975 case Intrinsic::amdgcn_fmed3: 6976 return DAG.getNode(AMDGPUISD::FMED3, DL, VT, 6977 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 6978 case Intrinsic::amdgcn_fdot2: 6979 return DAG.getNode(AMDGPUISD::FDOT2, DL, VT, 6980 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 6981 Op.getOperand(4)); 6982 case Intrinsic::amdgcn_fmul_legacy: 6983 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 6984 Op.getOperand(1), Op.getOperand(2)); 6985 case Intrinsic::amdgcn_sffbh: 6986 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 6987 case Intrinsic::amdgcn_sbfe: 6988 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, 6989 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 6990 case Intrinsic::amdgcn_ubfe: 6991 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, 6992 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 6993 case Intrinsic::amdgcn_cvt_pkrtz: 6994 case Intrinsic::amdgcn_cvt_pknorm_i16: 6995 case Intrinsic::amdgcn_cvt_pknorm_u16: 6996 case Intrinsic::amdgcn_cvt_pk_i16: 6997 case Intrinsic::amdgcn_cvt_pk_u16: { 6998 // FIXME: Stop adding cast if v2f16/v2i16 are legal. 6999 EVT VT = Op.getValueType(); 7000 unsigned Opcode; 7001 7002 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz) 7003 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32; 7004 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16) 7005 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; 7006 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16) 7007 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; 7008 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16) 7009 Opcode = AMDGPUISD::CVT_PK_I16_I32; 7010 else 7011 Opcode = AMDGPUISD::CVT_PK_U16_U32; 7012 7013 if (isTypeLegal(VT)) 7014 return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2)); 7015 7016 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32, 7017 Op.getOperand(1), Op.getOperand(2)); 7018 return DAG.getNode(ISD::BITCAST, DL, VT, Node); 7019 } 7020 case Intrinsic::amdgcn_fmad_ftz: 7021 return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1), 7022 Op.getOperand(2), Op.getOperand(3)); 7023 7024 case Intrinsic::amdgcn_if_break: 7025 return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT, 7026 Op->getOperand(1), Op->getOperand(2)), 0); 7027 7028 case Intrinsic::amdgcn_groupstaticsize: { 7029 Triple::OSType OS = getTargetMachine().getTargetTriple().getOS(); 7030 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) 7031 return Op; 7032 7033 const Module *M = MF.getFunction().getParent(); 7034 const GlobalValue *GV = 7035 M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize)); 7036 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0, 7037 SIInstrInfo::MO_ABS32_LO); 7038 return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; 7039 } 7040 case Intrinsic::amdgcn_is_shared: 7041 case Intrinsic::amdgcn_is_private: { 7042 SDLoc SL(Op); 7043 unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ? 7044 AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS; 7045 SDValue Aperture = getSegmentAperture(AS, SL, DAG); 7046 SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, 7047 Op.getOperand(1)); 7048 7049 SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec, 7050 DAG.getConstant(1, SL, MVT::i32)); 7051 return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ); 7052 } 7053 case Intrinsic::amdgcn_perm: 7054 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, Op.getOperand(1), 7055 Op.getOperand(2), Op.getOperand(3)); 7056 case Intrinsic::amdgcn_reloc_constant: { 7057 Module *M = const_cast<Module *>(MF.getFunction().getParent()); 7058 const MDNode *Metadata = cast<MDNodeSDNode>(Op.getOperand(1))->getMD(); 7059 auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); 7060 auto RelocSymbol = cast<GlobalVariable>( 7061 M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); 7062 SDValue GA = DAG.getTargetGlobalAddress(RelocSymbol, DL, MVT::i32, 0, 7063 SIInstrInfo::MO_ABS32_LO); 7064 return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; 7065 } 7066 default: 7067 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 7068 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) 7069 return lowerImage(Op, ImageDimIntr, DAG, false); 7070 7071 return Op; 7072 } 7073 } 7074 7075 /// Update \p MMO based on the offset inputs to an intrinsic. 7076 static void updateBufferMMO(MachineMemOperand *MMO, SDValue VOffset, 7077 SDValue SOffset, SDValue Offset, 7078 SDValue VIndex = SDValue()) { 7079 if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) || 7080 !isa<ConstantSDNode>(Offset)) { 7081 // The combined offset is not known to be constant, so we cannot represent 7082 // it in the MMO. Give up. 7083 MMO->setValue((Value *)nullptr); 7084 return; 7085 } 7086 7087 if (VIndex && (!isa<ConstantSDNode>(VIndex) || 7088 !cast<ConstantSDNode>(VIndex)->isZero())) { 7089 // The strided index component of the address is not known to be zero, so we 7090 // cannot represent it in the MMO. Give up. 7091 MMO->setValue((Value *)nullptr); 7092 return; 7093 } 7094 7095 MMO->setOffset(cast<ConstantSDNode>(VOffset)->getSExtValue() + 7096 cast<ConstantSDNode>(SOffset)->getSExtValue() + 7097 cast<ConstantSDNode>(Offset)->getSExtValue()); 7098 } 7099 7100 SDValue SITargetLowering::lowerRawBufferAtomicIntrin(SDValue Op, 7101 SelectionDAG &DAG, 7102 unsigned NewOpcode) const { 7103 SDLoc DL(Op); 7104 7105 SDValue VData = Op.getOperand(2); 7106 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 7107 SDValue Ops[] = { 7108 Op.getOperand(0), // Chain 7109 VData, // vdata 7110 Op.getOperand(3), // rsrc 7111 DAG.getConstant(0, DL, MVT::i32), // vindex 7112 Offsets.first, // voffset 7113 Op.getOperand(5), // soffset 7114 Offsets.second, // offset 7115 Op.getOperand(6), // cachepolicy 7116 DAG.getTargetConstant(0, DL, MVT::i1), // idxen 7117 }; 7118 7119 auto *M = cast<MemSDNode>(Op); 7120 updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]); 7121 7122 EVT MemVT = VData.getValueType(); 7123 return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, 7124 M->getMemOperand()); 7125 } 7126 7127 // Return a value to use for the idxen operand by examining the vindex operand. 7128 static unsigned getIdxEn(SDValue VIndex) { 7129 if (auto VIndexC = dyn_cast<ConstantSDNode>(VIndex)) 7130 // No need to set idxen if vindex is known to be zero. 7131 return VIndexC->getZExtValue() != 0; 7132 return 1; 7133 } 7134 7135 SDValue 7136 SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG, 7137 unsigned NewOpcode) const { 7138 SDLoc DL(Op); 7139 7140 SDValue VData = Op.getOperand(2); 7141 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 7142 SDValue Ops[] = { 7143 Op.getOperand(0), // Chain 7144 VData, // vdata 7145 Op.getOperand(3), // rsrc 7146 Op.getOperand(4), // vindex 7147 Offsets.first, // voffset 7148 Op.getOperand(6), // soffset 7149 Offsets.second, // offset 7150 Op.getOperand(7), // cachepolicy 7151 DAG.getTargetConstant(1, DL, MVT::i1), // idxen 7152 }; 7153 7154 auto *M = cast<MemSDNode>(Op); 7155 updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); 7156 7157 EVT MemVT = VData.getValueType(); 7158 return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, 7159 M->getMemOperand()); 7160 } 7161 7162 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 7163 SelectionDAG &DAG) const { 7164 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7165 SDLoc DL(Op); 7166 7167 switch (IntrID) { 7168 case Intrinsic::amdgcn_ds_ordered_add: 7169 case Intrinsic::amdgcn_ds_ordered_swap: { 7170 MemSDNode *M = cast<MemSDNode>(Op); 7171 SDValue Chain = M->getOperand(0); 7172 SDValue M0 = M->getOperand(2); 7173 SDValue Value = M->getOperand(3); 7174 unsigned IndexOperand = M->getConstantOperandVal(7); 7175 unsigned WaveRelease = M->getConstantOperandVal(8); 7176 unsigned WaveDone = M->getConstantOperandVal(9); 7177 7178 unsigned OrderedCountIndex = IndexOperand & 0x3f; 7179 IndexOperand &= ~0x3f; 7180 unsigned CountDw = 0; 7181 7182 if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) { 7183 CountDw = (IndexOperand >> 24) & 0xf; 7184 IndexOperand &= ~(0xf << 24); 7185 7186 if (CountDw < 1 || CountDw > 4) { 7187 report_fatal_error( 7188 "ds_ordered_count: dword count must be between 1 and 4"); 7189 } 7190 } 7191 7192 if (IndexOperand) 7193 report_fatal_error("ds_ordered_count: bad index operand"); 7194 7195 if (WaveDone && !WaveRelease) 7196 report_fatal_error("ds_ordered_count: wave_done requires wave_release"); 7197 7198 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; 7199 unsigned ShaderType = 7200 SIInstrInfo::getDSShaderTypeValue(DAG.getMachineFunction()); 7201 unsigned Offset0 = OrderedCountIndex << 2; 7202 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4); 7203 7204 if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) 7205 Offset1 |= (CountDw - 1) << 6; 7206 7207 if (Subtarget->getGeneration() < AMDGPUSubtarget::GFX11) 7208 Offset1 |= ShaderType << 2; 7209 7210 unsigned Offset = Offset0 | (Offset1 << 8); 7211 7212 SDValue Ops[] = { 7213 Chain, 7214 Value, 7215 DAG.getTargetConstant(Offset, DL, MVT::i16), 7216 copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue 7217 }; 7218 return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL, 7219 M->getVTList(), Ops, M->getMemoryVT(), 7220 M->getMemOperand()); 7221 } 7222 case Intrinsic::amdgcn_ds_fadd: { 7223 MemSDNode *M = cast<MemSDNode>(Op); 7224 unsigned Opc; 7225 switch (IntrID) { 7226 case Intrinsic::amdgcn_ds_fadd: 7227 Opc = ISD::ATOMIC_LOAD_FADD; 7228 break; 7229 } 7230 7231 return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(), 7232 M->getOperand(0), M->getOperand(2), M->getOperand(3), 7233 M->getMemOperand()); 7234 } 7235 case Intrinsic::amdgcn_atomic_inc: 7236 case Intrinsic::amdgcn_atomic_dec: 7237 case Intrinsic::amdgcn_ds_fmin: 7238 case Intrinsic::amdgcn_ds_fmax: { 7239 MemSDNode *M = cast<MemSDNode>(Op); 7240 unsigned Opc; 7241 switch (IntrID) { 7242 case Intrinsic::amdgcn_atomic_inc: 7243 Opc = AMDGPUISD::ATOMIC_INC; 7244 break; 7245 case Intrinsic::amdgcn_atomic_dec: 7246 Opc = AMDGPUISD::ATOMIC_DEC; 7247 break; 7248 case Intrinsic::amdgcn_ds_fmin: 7249 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN; 7250 break; 7251 case Intrinsic::amdgcn_ds_fmax: 7252 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX; 7253 break; 7254 default: 7255 llvm_unreachable("Unknown intrinsic!"); 7256 } 7257 SDValue Ops[] = { 7258 M->getOperand(0), // Chain 7259 M->getOperand(2), // Ptr 7260 M->getOperand(3) // Value 7261 }; 7262 7263 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 7264 M->getMemoryVT(), M->getMemOperand()); 7265 } 7266 case Intrinsic::amdgcn_buffer_load: 7267 case Intrinsic::amdgcn_buffer_load_format: { 7268 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 7269 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); 7270 unsigned IdxEn = getIdxEn(Op.getOperand(3)); 7271 SDValue Ops[] = { 7272 Op.getOperand(0), // Chain 7273 Op.getOperand(2), // rsrc 7274 Op.getOperand(3), // vindex 7275 SDValue(), // voffset -- will be set by setBufferOffsets 7276 SDValue(), // soffset -- will be set by setBufferOffsets 7277 SDValue(), // offset -- will be set by setBufferOffsets 7278 DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 7279 DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen 7280 }; 7281 setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]); 7282 7283 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? 7284 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 7285 7286 EVT VT = Op.getValueType(); 7287 EVT IntVT = VT.changeTypeToInteger(); 7288 auto *M = cast<MemSDNode>(Op); 7289 updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]); 7290 EVT LoadVT = Op.getValueType(); 7291 7292 if (LoadVT.getScalarType() == MVT::f16) 7293 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, 7294 M, DAG, Ops); 7295 7296 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics 7297 if (LoadVT.getScalarType() == MVT::i8 || 7298 LoadVT.getScalarType() == MVT::i16) 7299 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); 7300 7301 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, 7302 M->getMemOperand(), DAG); 7303 } 7304 case Intrinsic::amdgcn_raw_buffer_load: 7305 case Intrinsic::amdgcn_raw_buffer_load_format: { 7306 const bool IsFormat = IntrID == Intrinsic::amdgcn_raw_buffer_load_format; 7307 7308 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); 7309 SDValue Ops[] = { 7310 Op.getOperand(0), // Chain 7311 Op.getOperand(2), // rsrc 7312 DAG.getConstant(0, DL, MVT::i32), // vindex 7313 Offsets.first, // voffset 7314 Op.getOperand(4), // soffset 7315 Offsets.second, // offset 7316 Op.getOperand(5), // cachepolicy, swizzled buffer 7317 DAG.getTargetConstant(0, DL, MVT::i1), // idxen 7318 }; 7319 7320 auto *M = cast<MemSDNode>(Op); 7321 updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5]); 7322 return lowerIntrinsicLoad(M, IsFormat, DAG, Ops); 7323 } 7324 case Intrinsic::amdgcn_struct_buffer_load: 7325 case Intrinsic::amdgcn_struct_buffer_load_format: { 7326 const bool IsFormat = IntrID == Intrinsic::amdgcn_struct_buffer_load_format; 7327 7328 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 7329 SDValue Ops[] = { 7330 Op.getOperand(0), // Chain 7331 Op.getOperand(2), // rsrc 7332 Op.getOperand(3), // vindex 7333 Offsets.first, // voffset 7334 Op.getOperand(5), // soffset 7335 Offsets.second, // offset 7336 Op.getOperand(6), // cachepolicy, swizzled buffer 7337 DAG.getTargetConstant(1, DL, MVT::i1), // idxen 7338 }; 7339 7340 auto *M = cast<MemSDNode>(Op); 7341 updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]); 7342 return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops); 7343 } 7344 case Intrinsic::amdgcn_tbuffer_load: { 7345 MemSDNode *M = cast<MemSDNode>(Op); 7346 EVT LoadVT = Op.getValueType(); 7347 7348 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); 7349 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); 7350 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); 7351 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); 7352 unsigned IdxEn = getIdxEn(Op.getOperand(3)); 7353 SDValue Ops[] = { 7354 Op.getOperand(0), // Chain 7355 Op.getOperand(2), // rsrc 7356 Op.getOperand(3), // vindex 7357 Op.getOperand(4), // voffset 7358 Op.getOperand(5), // soffset 7359 Op.getOperand(6), // offset 7360 DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format 7361 DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 7362 DAG.getTargetConstant(IdxEn, DL, MVT::i1) // idxen 7363 }; 7364 7365 if (LoadVT.getScalarType() == MVT::f16) 7366 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, 7367 M, DAG, Ops); 7368 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 7369 Op->getVTList(), Ops, LoadVT, M->getMemOperand(), 7370 DAG); 7371 } 7372 case Intrinsic::amdgcn_raw_tbuffer_load: { 7373 MemSDNode *M = cast<MemSDNode>(Op); 7374 EVT LoadVT = Op.getValueType(); 7375 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); 7376 7377 SDValue Ops[] = { 7378 Op.getOperand(0), // Chain 7379 Op.getOperand(2), // rsrc 7380 DAG.getConstant(0, DL, MVT::i32), // vindex 7381 Offsets.first, // voffset 7382 Op.getOperand(4), // soffset 7383 Offsets.second, // offset 7384 Op.getOperand(5), // format 7385 Op.getOperand(6), // cachepolicy, swizzled buffer 7386 DAG.getTargetConstant(0, DL, MVT::i1), // idxen 7387 }; 7388 7389 if (LoadVT.getScalarType() == MVT::f16) 7390 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, 7391 M, DAG, Ops); 7392 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 7393 Op->getVTList(), Ops, LoadVT, M->getMemOperand(), 7394 DAG); 7395 } 7396 case Intrinsic::amdgcn_struct_tbuffer_load: { 7397 MemSDNode *M = cast<MemSDNode>(Op); 7398 EVT LoadVT = Op.getValueType(); 7399 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 7400 7401 SDValue Ops[] = { 7402 Op.getOperand(0), // Chain 7403 Op.getOperand(2), // rsrc 7404 Op.getOperand(3), // vindex 7405 Offsets.first, // voffset 7406 Op.getOperand(5), // soffset 7407 Offsets.second, // offset 7408 Op.getOperand(6), // format 7409 Op.getOperand(7), // cachepolicy, swizzled buffer 7410 DAG.getTargetConstant(1, DL, MVT::i1), // idxen 7411 }; 7412 7413 if (LoadVT.getScalarType() == MVT::f16) 7414 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, 7415 M, DAG, Ops); 7416 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 7417 Op->getVTList(), Ops, LoadVT, M->getMemOperand(), 7418 DAG); 7419 } 7420 case Intrinsic::amdgcn_buffer_atomic_swap: 7421 case Intrinsic::amdgcn_buffer_atomic_add: 7422 case Intrinsic::amdgcn_buffer_atomic_sub: 7423 case Intrinsic::amdgcn_buffer_atomic_csub: 7424 case Intrinsic::amdgcn_buffer_atomic_smin: 7425 case Intrinsic::amdgcn_buffer_atomic_umin: 7426 case Intrinsic::amdgcn_buffer_atomic_smax: 7427 case Intrinsic::amdgcn_buffer_atomic_umax: 7428 case Intrinsic::amdgcn_buffer_atomic_and: 7429 case Intrinsic::amdgcn_buffer_atomic_or: 7430 case Intrinsic::amdgcn_buffer_atomic_xor: 7431 case Intrinsic::amdgcn_buffer_atomic_fadd: { 7432 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); 7433 unsigned IdxEn = getIdxEn(Op.getOperand(4)); 7434 SDValue Ops[] = { 7435 Op.getOperand(0), // Chain 7436 Op.getOperand(2), // vdata 7437 Op.getOperand(3), // rsrc 7438 Op.getOperand(4), // vindex 7439 SDValue(), // voffset -- will be set by setBufferOffsets 7440 SDValue(), // soffset -- will be set by setBufferOffsets 7441 SDValue(), // offset -- will be set by setBufferOffsets 7442 DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy 7443 DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen 7444 }; 7445 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); 7446 7447 EVT VT = Op.getValueType(); 7448 7449 auto *M = cast<MemSDNode>(Op); 7450 updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); 7451 unsigned Opcode = 0; 7452 7453 switch (IntrID) { 7454 case Intrinsic::amdgcn_buffer_atomic_swap: 7455 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; 7456 break; 7457 case Intrinsic::amdgcn_buffer_atomic_add: 7458 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; 7459 break; 7460 case Intrinsic::amdgcn_buffer_atomic_sub: 7461 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; 7462 break; 7463 case Intrinsic::amdgcn_buffer_atomic_csub: 7464 Opcode = AMDGPUISD::BUFFER_ATOMIC_CSUB; 7465 break; 7466 case Intrinsic::amdgcn_buffer_atomic_smin: 7467 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; 7468 break; 7469 case Intrinsic::amdgcn_buffer_atomic_umin: 7470 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; 7471 break; 7472 case Intrinsic::amdgcn_buffer_atomic_smax: 7473 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; 7474 break; 7475 case Intrinsic::amdgcn_buffer_atomic_umax: 7476 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; 7477 break; 7478 case Intrinsic::amdgcn_buffer_atomic_and: 7479 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; 7480 break; 7481 case Intrinsic::amdgcn_buffer_atomic_or: 7482 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; 7483 break; 7484 case Intrinsic::amdgcn_buffer_atomic_xor: 7485 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; 7486 break; 7487 case Intrinsic::amdgcn_buffer_atomic_fadd: 7488 if (!Op.getValue(0).use_empty() && !hasAtomicFaddRtnForTy(Op)) { 7489 DiagnosticInfoUnsupported 7490 NoFpRet(DAG.getMachineFunction().getFunction(), 7491 "return versions of fp atomics not supported", 7492 DL.getDebugLoc(), DS_Error); 7493 DAG.getContext()->diagnose(NoFpRet); 7494 return SDValue(); 7495 } 7496 Opcode = AMDGPUISD::BUFFER_ATOMIC_FADD; 7497 break; 7498 default: 7499 llvm_unreachable("unhandled atomic opcode"); 7500 } 7501 7502 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 7503 M->getMemOperand()); 7504 } 7505 case Intrinsic::amdgcn_raw_buffer_atomic_fadd: 7506 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD); 7507 case Intrinsic::amdgcn_struct_buffer_atomic_fadd: 7508 return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD); 7509 case Intrinsic::amdgcn_raw_buffer_atomic_fmin: 7510 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN); 7511 case Intrinsic::amdgcn_struct_buffer_atomic_fmin: 7512 return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN); 7513 case Intrinsic::amdgcn_raw_buffer_atomic_fmax: 7514 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX); 7515 case Intrinsic::amdgcn_struct_buffer_atomic_fmax: 7516 return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX); 7517 case Intrinsic::amdgcn_raw_buffer_atomic_swap: 7518 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SWAP); 7519 case Intrinsic::amdgcn_raw_buffer_atomic_add: 7520 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD); 7521 case Intrinsic::amdgcn_raw_buffer_atomic_sub: 7522 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB); 7523 case Intrinsic::amdgcn_raw_buffer_atomic_smin: 7524 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMIN); 7525 case Intrinsic::amdgcn_raw_buffer_atomic_umin: 7526 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMIN); 7527 case Intrinsic::amdgcn_raw_buffer_atomic_smax: 7528 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMAX); 7529 case Intrinsic::amdgcn_raw_buffer_atomic_umax: 7530 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMAX); 7531 case Intrinsic::amdgcn_raw_buffer_atomic_and: 7532 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND); 7533 case Intrinsic::amdgcn_raw_buffer_atomic_or: 7534 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR); 7535 case Intrinsic::amdgcn_raw_buffer_atomic_xor: 7536 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR); 7537 case Intrinsic::amdgcn_raw_buffer_atomic_inc: 7538 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC); 7539 case Intrinsic::amdgcn_raw_buffer_atomic_dec: 7540 return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC); 7541 case Intrinsic::amdgcn_struct_buffer_atomic_swap: 7542 return lowerStructBufferAtomicIntrin(Op, DAG, 7543 AMDGPUISD::BUFFER_ATOMIC_SWAP); 7544 case Intrinsic::amdgcn_struct_buffer_atomic_add: 7545 return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD); 7546 case Intrinsic::amdgcn_struct_buffer_atomic_sub: 7547 return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB); 7548 case Intrinsic::amdgcn_struct_buffer_atomic_smin: 7549 return lowerStructBufferAtomicIntrin(Op, DAG, 7550 AMDGPUISD::BUFFER_ATOMIC_SMIN); 7551 case Intrinsic::amdgcn_struct_buffer_atomic_umin: 7552 return lowerStructBufferAtomicIntrin(Op, DAG, 7553 AMDGPUISD::BUFFER_ATOMIC_UMIN); 7554 case Intrinsic::amdgcn_struct_buffer_atomic_smax: 7555 return lowerStructBufferAtomicIntrin(Op, DAG, 7556 AMDGPUISD::BUFFER_ATOMIC_SMAX); 7557 case Intrinsic::amdgcn_struct_buffer_atomic_umax: 7558 return lowerStructBufferAtomicIntrin(Op, DAG, 7559 AMDGPUISD::BUFFER_ATOMIC_UMAX); 7560 case Intrinsic::amdgcn_struct_buffer_atomic_and: 7561 return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND); 7562 case Intrinsic::amdgcn_struct_buffer_atomic_or: 7563 return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR); 7564 case Intrinsic::amdgcn_struct_buffer_atomic_xor: 7565 return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR); 7566 case Intrinsic::amdgcn_struct_buffer_atomic_inc: 7567 return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC); 7568 case Intrinsic::amdgcn_struct_buffer_atomic_dec: 7569 return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC); 7570 7571 case Intrinsic::amdgcn_buffer_atomic_cmpswap: { 7572 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); 7573 unsigned IdxEn = getIdxEn(Op.getOperand(5)); 7574 SDValue Ops[] = { 7575 Op.getOperand(0), // Chain 7576 Op.getOperand(2), // src 7577 Op.getOperand(3), // cmp 7578 Op.getOperand(4), // rsrc 7579 Op.getOperand(5), // vindex 7580 SDValue(), // voffset -- will be set by setBufferOffsets 7581 SDValue(), // soffset -- will be set by setBufferOffsets 7582 SDValue(), // offset -- will be set by setBufferOffsets 7583 DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy 7584 DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen 7585 }; 7586 setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]); 7587 7588 EVT VT = Op.getValueType(); 7589 auto *M = cast<MemSDNode>(Op); 7590 updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]); 7591 7592 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 7593 Op->getVTList(), Ops, VT, M->getMemOperand()); 7594 } 7595 case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: { 7596 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 7597 SDValue Ops[] = { 7598 Op.getOperand(0), // Chain 7599 Op.getOperand(2), // src 7600 Op.getOperand(3), // cmp 7601 Op.getOperand(4), // rsrc 7602 DAG.getConstant(0, DL, MVT::i32), // vindex 7603 Offsets.first, // voffset 7604 Op.getOperand(6), // soffset 7605 Offsets.second, // offset 7606 Op.getOperand(7), // cachepolicy 7607 DAG.getTargetConstant(0, DL, MVT::i1), // idxen 7608 }; 7609 EVT VT = Op.getValueType(); 7610 auto *M = cast<MemSDNode>(Op); 7611 updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7]); 7612 7613 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 7614 Op->getVTList(), Ops, VT, M->getMemOperand()); 7615 } 7616 case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: { 7617 auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG); 7618 SDValue Ops[] = { 7619 Op.getOperand(0), // Chain 7620 Op.getOperand(2), // src 7621 Op.getOperand(3), // cmp 7622 Op.getOperand(4), // rsrc 7623 Op.getOperand(5), // vindex 7624 Offsets.first, // voffset 7625 Op.getOperand(7), // soffset 7626 Offsets.second, // offset 7627 Op.getOperand(8), // cachepolicy 7628 DAG.getTargetConstant(1, DL, MVT::i1), // idxen 7629 }; 7630 EVT VT = Op.getValueType(); 7631 auto *M = cast<MemSDNode>(Op); 7632 updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]); 7633 7634 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 7635 Op->getVTList(), Ops, VT, M->getMemOperand()); 7636 } 7637 case Intrinsic::amdgcn_image_bvh_intersect_ray: { 7638 MemSDNode *M = cast<MemSDNode>(Op); 7639 SDValue NodePtr = M->getOperand(2); 7640 SDValue RayExtent = M->getOperand(3); 7641 SDValue RayOrigin = M->getOperand(4); 7642 SDValue RayDir = M->getOperand(5); 7643 SDValue RayInvDir = M->getOperand(6); 7644 SDValue TDescr = M->getOperand(7); 7645 7646 assert(NodePtr.getValueType() == MVT::i32 || 7647 NodePtr.getValueType() == MVT::i64); 7648 assert(RayDir.getValueType() == MVT::v3f16 || 7649 RayDir.getValueType() == MVT::v3f32); 7650 7651 if (!Subtarget->hasGFX10_AEncoding()) { 7652 emitRemovedIntrinsicError(DAG, DL, Op.getValueType()); 7653 return SDValue(); 7654 } 7655 7656 const bool IsGFX11Plus = AMDGPU::isGFX11Plus(*Subtarget); 7657 const bool IsA16 = RayDir.getValueType().getVectorElementType() == MVT::f16; 7658 const bool Is64 = NodePtr.getValueType() == MVT::i64; 7659 const unsigned NumVDataDwords = 4; 7660 const unsigned NumVAddrDwords = IsA16 ? (Is64 ? 9 : 8) : (Is64 ? 12 : 11); 7661 const unsigned NumVAddrs = IsGFX11Plus ? (IsA16 ? 4 : 5) : NumVAddrDwords; 7662 const bool UseNSA = 7663 Subtarget->hasNSAEncoding() && NumVAddrs <= Subtarget->getNSAMaxSize(); 7664 const unsigned BaseOpcodes[2][2] = { 7665 {AMDGPU::IMAGE_BVH_INTERSECT_RAY, AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16}, 7666 {AMDGPU::IMAGE_BVH64_INTERSECT_RAY, 7667 AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16}}; 7668 int Opcode; 7669 if (UseNSA) { 7670 Opcode = AMDGPU::getMIMGOpcode(BaseOpcodes[Is64][IsA16], 7671 IsGFX11Plus ? AMDGPU::MIMGEncGfx11NSA 7672 : AMDGPU::MIMGEncGfx10NSA, 7673 NumVDataDwords, NumVAddrDwords); 7674 } else { 7675 Opcode = 7676 AMDGPU::getMIMGOpcode(BaseOpcodes[Is64][IsA16], 7677 IsGFX11Plus ? AMDGPU::MIMGEncGfx11Default 7678 : AMDGPU::MIMGEncGfx10Default, 7679 NumVDataDwords, PowerOf2Ceil(NumVAddrDwords)); 7680 } 7681 assert(Opcode != -1); 7682 7683 SmallVector<SDValue, 16> Ops; 7684 7685 auto packLanes = [&DAG, &Ops, &DL] (SDValue Op, bool IsAligned) { 7686 SmallVector<SDValue, 3> Lanes; 7687 DAG.ExtractVectorElements(Op, Lanes, 0, 3); 7688 if (Lanes[0].getValueSizeInBits() == 32) { 7689 for (unsigned I = 0; I < 3; ++I) 7690 Ops.push_back(DAG.getBitcast(MVT::i32, Lanes[I])); 7691 } else { 7692 if (IsAligned) { 7693 Ops.push_back( 7694 DAG.getBitcast(MVT::i32, 7695 DAG.getBuildVector(MVT::v2f16, DL, 7696 { Lanes[0], Lanes[1] }))); 7697 Ops.push_back(Lanes[2]); 7698 } else { 7699 SDValue Elt0 = Ops.pop_back_val(); 7700 Ops.push_back( 7701 DAG.getBitcast(MVT::i32, 7702 DAG.getBuildVector(MVT::v2f16, DL, 7703 { Elt0, Lanes[0] }))); 7704 Ops.push_back( 7705 DAG.getBitcast(MVT::i32, 7706 DAG.getBuildVector(MVT::v2f16, DL, 7707 { Lanes[1], Lanes[2] }))); 7708 } 7709 } 7710 }; 7711 7712 if (UseNSA && IsGFX11Plus) { 7713 Ops.push_back(NodePtr); 7714 Ops.push_back(DAG.getBitcast(MVT::i32, RayExtent)); 7715 Ops.push_back(RayOrigin); 7716 if (IsA16) { 7717 SmallVector<SDValue, 3> DirLanes, InvDirLanes, MergedLanes; 7718 DAG.ExtractVectorElements(RayDir, DirLanes, 0, 3); 7719 DAG.ExtractVectorElements(RayInvDir, InvDirLanes, 0, 3); 7720 for (unsigned I = 0; I < 3; ++I) { 7721 MergedLanes.push_back(DAG.getBitcast( 7722 MVT::i32, DAG.getBuildVector(MVT::v2f16, DL, 7723 {DirLanes[I], InvDirLanes[I]}))); 7724 } 7725 Ops.push_back(DAG.getBuildVector(MVT::v3i32, DL, MergedLanes)); 7726 } else { 7727 Ops.push_back(RayDir); 7728 Ops.push_back(RayInvDir); 7729 } 7730 } else { 7731 if (Is64) 7732 DAG.ExtractVectorElements(DAG.getBitcast(MVT::v2i32, NodePtr), Ops, 0, 7733 2); 7734 else 7735 Ops.push_back(NodePtr); 7736 7737 Ops.push_back(DAG.getBitcast(MVT::i32, RayExtent)); 7738 packLanes(RayOrigin, true); 7739 packLanes(RayDir, true); 7740 packLanes(RayInvDir, false); 7741 } 7742 7743 if (!UseNSA) { 7744 // Build a single vector containing all the operands so far prepared. 7745 if (NumVAddrDwords > 8) { 7746 SDValue Undef = DAG.getUNDEF(MVT::i32); 7747 Ops.append(16 - Ops.size(), Undef); 7748 } 7749 assert(Ops.size() == 8 || Ops.size() == 16); 7750 SDValue MergedOps = DAG.getBuildVector( 7751 Ops.size() == 16 ? MVT::v16i32 : MVT::v8i32, DL, Ops); 7752 Ops.clear(); 7753 Ops.push_back(MergedOps); 7754 } 7755 7756 Ops.push_back(TDescr); 7757 if (IsA16) 7758 Ops.push_back(DAG.getTargetConstant(1, DL, MVT::i1)); 7759 Ops.push_back(M->getChain()); 7760 7761 auto *NewNode = DAG.getMachineNode(Opcode, DL, M->getVTList(), Ops); 7762 MachineMemOperand *MemRef = M->getMemOperand(); 7763 DAG.setNodeMemRefs(NewNode, {MemRef}); 7764 return SDValue(NewNode, 0); 7765 } 7766 case Intrinsic::amdgcn_global_atomic_fadd: 7767 if (!Op.getValue(0).use_empty() && !Subtarget->hasGFX90AInsts()) { 7768 DiagnosticInfoUnsupported 7769 NoFpRet(DAG.getMachineFunction().getFunction(), 7770 "return versions of fp atomics not supported", 7771 DL.getDebugLoc(), DS_Error); 7772 DAG.getContext()->diagnose(NoFpRet); 7773 return SDValue(); 7774 } 7775 LLVM_FALLTHROUGH; 7776 case Intrinsic::amdgcn_global_atomic_fmin: 7777 case Intrinsic::amdgcn_global_atomic_fmax: 7778 case Intrinsic::amdgcn_flat_atomic_fadd: 7779 case Intrinsic::amdgcn_flat_atomic_fmin: 7780 case Intrinsic::amdgcn_flat_atomic_fmax: { 7781 MemSDNode *M = cast<MemSDNode>(Op); 7782 SDValue Ops[] = { 7783 M->getOperand(0), // Chain 7784 M->getOperand(2), // Ptr 7785 M->getOperand(3) // Value 7786 }; 7787 unsigned Opcode = 0; 7788 switch (IntrID) { 7789 case Intrinsic::amdgcn_global_atomic_fadd: 7790 case Intrinsic::amdgcn_flat_atomic_fadd: { 7791 EVT VT = Op.getOperand(3).getValueType(); 7792 return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT, 7793 DAG.getVTList(VT, MVT::Other), Ops, 7794 M->getMemOperand()); 7795 } 7796 case Intrinsic::amdgcn_global_atomic_fmin: 7797 case Intrinsic::amdgcn_flat_atomic_fmin: { 7798 Opcode = AMDGPUISD::ATOMIC_LOAD_FMIN; 7799 break; 7800 } 7801 case Intrinsic::amdgcn_global_atomic_fmax: 7802 case Intrinsic::amdgcn_flat_atomic_fmax: { 7803 Opcode = AMDGPUISD::ATOMIC_LOAD_FMAX; 7804 break; 7805 } 7806 default: 7807 llvm_unreachable("unhandled atomic opcode"); 7808 } 7809 return DAG.getMemIntrinsicNode(Opcode, SDLoc(Op), 7810 M->getVTList(), Ops, M->getMemoryVT(), 7811 M->getMemOperand()); 7812 } 7813 default: 7814 7815 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 7816 AMDGPU::getImageDimIntrinsicInfo(IntrID)) 7817 return lowerImage(Op, ImageDimIntr, DAG, true); 7818 7819 return SDValue(); 7820 } 7821 } 7822 7823 // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to 7824 // dwordx4 if on SI. 7825 SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, 7826 SDVTList VTList, 7827 ArrayRef<SDValue> Ops, EVT MemVT, 7828 MachineMemOperand *MMO, 7829 SelectionDAG &DAG) const { 7830 EVT VT = VTList.VTs[0]; 7831 EVT WidenedVT = VT; 7832 EVT WidenedMemVT = MemVT; 7833 if (!Subtarget->hasDwordx3LoadStores() && 7834 (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) { 7835 WidenedVT = EVT::getVectorVT(*DAG.getContext(), 7836 WidenedVT.getVectorElementType(), 4); 7837 WidenedMemVT = EVT::getVectorVT(*DAG.getContext(), 7838 WidenedMemVT.getVectorElementType(), 4); 7839 MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16); 7840 } 7841 7842 assert(VTList.NumVTs == 2); 7843 SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]); 7844 7845 auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops, 7846 WidenedMemVT, MMO); 7847 if (WidenedVT != VT) { 7848 auto Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp, 7849 DAG.getVectorIdxConstant(0, DL)); 7850 NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL); 7851 } 7852 return NewOp; 7853 } 7854 7855 SDValue SITargetLowering::handleD16VData(SDValue VData, SelectionDAG &DAG, 7856 bool ImageStore) const { 7857 EVT StoreVT = VData.getValueType(); 7858 7859 // No change for f16 and legal vector D16 types. 7860 if (!StoreVT.isVector()) 7861 return VData; 7862 7863 SDLoc DL(VData); 7864 unsigned NumElements = StoreVT.getVectorNumElements(); 7865 7866 if (Subtarget->hasUnpackedD16VMem()) { 7867 // We need to unpack the packed data to store. 7868 EVT IntStoreVT = StoreVT.changeTypeToInteger(); 7869 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); 7870 7871 EVT EquivStoreVT = 7872 EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElements); 7873 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData); 7874 return DAG.UnrollVectorOp(ZExt.getNode()); 7875 } 7876 7877 // The sq block of gfx8.1 does not estimate register use correctly for d16 7878 // image store instructions. The data operand is computed as if it were not a 7879 // d16 image instruction. 7880 if (ImageStore && Subtarget->hasImageStoreD16Bug()) { 7881 // Bitcast to i16 7882 EVT IntStoreVT = StoreVT.changeTypeToInteger(); 7883 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); 7884 7885 // Decompose into scalars 7886 SmallVector<SDValue, 4> Elts; 7887 DAG.ExtractVectorElements(IntVData, Elts); 7888 7889 // Group pairs of i16 into v2i16 and bitcast to i32 7890 SmallVector<SDValue, 4> PackedElts; 7891 for (unsigned I = 0; I < Elts.size() / 2; I += 1) { 7892 SDValue Pair = 7893 DAG.getBuildVector(MVT::v2i16, DL, {Elts[I * 2], Elts[I * 2 + 1]}); 7894 SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair); 7895 PackedElts.push_back(IntPair); 7896 } 7897 if ((NumElements % 2) == 1) { 7898 // Handle v3i16 7899 unsigned I = Elts.size() / 2; 7900 SDValue Pair = DAG.getBuildVector(MVT::v2i16, DL, 7901 {Elts[I * 2], DAG.getUNDEF(MVT::i16)}); 7902 SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair); 7903 PackedElts.push_back(IntPair); 7904 } 7905 7906 // Pad using UNDEF 7907 PackedElts.resize(Elts.size(), DAG.getUNDEF(MVT::i32)); 7908 7909 // Build final vector 7910 EVT VecVT = 7911 EVT::getVectorVT(*DAG.getContext(), MVT::i32, PackedElts.size()); 7912 return DAG.getBuildVector(VecVT, DL, PackedElts); 7913 } 7914 7915 if (NumElements == 3) { 7916 EVT IntStoreVT = 7917 EVT::getIntegerVT(*DAG.getContext(), StoreVT.getStoreSizeInBits()); 7918 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); 7919 7920 EVT WidenedStoreVT = EVT::getVectorVT( 7921 *DAG.getContext(), StoreVT.getVectorElementType(), NumElements + 1); 7922 EVT WidenedIntVT = EVT::getIntegerVT(*DAG.getContext(), 7923 WidenedStoreVT.getStoreSizeInBits()); 7924 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenedIntVT, IntVData); 7925 return DAG.getNode(ISD::BITCAST, DL, WidenedStoreVT, ZExt); 7926 } 7927 7928 assert(isTypeLegal(StoreVT)); 7929 return VData; 7930 } 7931 7932 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 7933 SelectionDAG &DAG) const { 7934 SDLoc DL(Op); 7935 SDValue Chain = Op.getOperand(0); 7936 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7937 MachineFunction &MF = DAG.getMachineFunction(); 7938 7939 switch (IntrinsicID) { 7940 case Intrinsic::amdgcn_exp_compr: { 7941 if (!Subtarget->hasCompressedExport()) { 7942 DiagnosticInfoUnsupported BadIntrin( 7943 DAG.getMachineFunction().getFunction(), 7944 "intrinsic not supported on subtarget", DL.getDebugLoc()); 7945 DAG.getContext()->diagnose(BadIntrin); 7946 } 7947 SDValue Src0 = Op.getOperand(4); 7948 SDValue Src1 = Op.getOperand(5); 7949 // Hack around illegal type on SI by directly selecting it. 7950 if (isTypeLegal(Src0.getValueType())) 7951 return SDValue(); 7952 7953 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); 7954 SDValue Undef = DAG.getUNDEF(MVT::f32); 7955 const SDValue Ops[] = { 7956 Op.getOperand(2), // tgt 7957 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), // src0 7958 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), // src1 7959 Undef, // src2 7960 Undef, // src3 7961 Op.getOperand(7), // vm 7962 DAG.getTargetConstant(1, DL, MVT::i1), // compr 7963 Op.getOperand(3), // en 7964 Op.getOperand(0) // Chain 7965 }; 7966 7967 unsigned Opc = Done->isZero() ? AMDGPU::EXP : AMDGPU::EXP_DONE; 7968 return SDValue(DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops), 0); 7969 } 7970 case Intrinsic::amdgcn_s_barrier: { 7971 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { 7972 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 7973 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; 7974 if (WGSize <= ST.getWavefrontSize()) 7975 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, 7976 Op.getOperand(0)), 0); 7977 } 7978 return SDValue(); 7979 }; 7980 case Intrinsic::amdgcn_tbuffer_store: { 7981 SDValue VData = Op.getOperand(2); 7982 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 7983 if (IsD16) 7984 VData = handleD16VData(VData, DAG); 7985 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); 7986 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); 7987 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); 7988 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue(); 7989 unsigned IdxEn = getIdxEn(Op.getOperand(4)); 7990 SDValue Ops[] = { 7991 Chain, 7992 VData, // vdata 7993 Op.getOperand(3), // rsrc 7994 Op.getOperand(4), // vindex 7995 Op.getOperand(5), // voffset 7996 Op.getOperand(6), // soffset 7997 Op.getOperand(7), // offset 7998 DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format 7999 DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 8000 DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen 8001 }; 8002 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : 8003 AMDGPUISD::TBUFFER_STORE_FORMAT; 8004 MemSDNode *M = cast<MemSDNode>(Op); 8005 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 8006 M->getMemoryVT(), M->getMemOperand()); 8007 } 8008 8009 case Intrinsic::amdgcn_struct_tbuffer_store: { 8010 SDValue VData = Op.getOperand(2); 8011 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 8012 if (IsD16) 8013 VData = handleD16VData(VData, DAG); 8014 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 8015 SDValue Ops[] = { 8016 Chain, 8017 VData, // vdata 8018 Op.getOperand(3), // rsrc 8019 Op.getOperand(4), // vindex 8020 Offsets.first, // voffset 8021 Op.getOperand(6), // soffset 8022 Offsets.second, // offset 8023 Op.getOperand(7), // format 8024 Op.getOperand(8), // cachepolicy, swizzled buffer 8025 DAG.getTargetConstant(1, DL, MVT::i1), // idxen 8026 }; 8027 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : 8028 AMDGPUISD::TBUFFER_STORE_FORMAT; 8029 MemSDNode *M = cast<MemSDNode>(Op); 8030 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 8031 M->getMemoryVT(), M->getMemOperand()); 8032 } 8033 8034 case Intrinsic::amdgcn_raw_tbuffer_store: { 8035 SDValue VData = Op.getOperand(2); 8036 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 8037 if (IsD16) 8038 VData = handleD16VData(VData, DAG); 8039 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 8040 SDValue Ops[] = { 8041 Chain, 8042 VData, // vdata 8043 Op.getOperand(3), // rsrc 8044 DAG.getConstant(0, DL, MVT::i32), // vindex 8045 Offsets.first, // voffset 8046 Op.getOperand(5), // soffset 8047 Offsets.second, // offset 8048 Op.getOperand(6), // format 8049 Op.getOperand(7), // cachepolicy, swizzled buffer 8050 DAG.getTargetConstant(0, DL, MVT::i1), // idxen 8051 }; 8052 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : 8053 AMDGPUISD::TBUFFER_STORE_FORMAT; 8054 MemSDNode *M = cast<MemSDNode>(Op); 8055 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 8056 M->getMemoryVT(), M->getMemOperand()); 8057 } 8058 8059 case Intrinsic::amdgcn_buffer_store: 8060 case Intrinsic::amdgcn_buffer_store_format: { 8061 SDValue VData = Op.getOperand(2); 8062 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 8063 if (IsD16) 8064 VData = handleD16VData(VData, DAG); 8065 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); 8066 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); 8067 unsigned IdxEn = getIdxEn(Op.getOperand(4)); 8068 SDValue Ops[] = { 8069 Chain, 8070 VData, 8071 Op.getOperand(3), // rsrc 8072 Op.getOperand(4), // vindex 8073 SDValue(), // voffset -- will be set by setBufferOffsets 8074 SDValue(), // soffset -- will be set by setBufferOffsets 8075 SDValue(), // offset -- will be set by setBufferOffsets 8076 DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy 8077 DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen 8078 }; 8079 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); 8080 8081 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ? 8082 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; 8083 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; 8084 MemSDNode *M = cast<MemSDNode>(Op); 8085 updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); 8086 8087 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics 8088 EVT VDataType = VData.getValueType().getScalarType(); 8089 if (VDataType == MVT::i8 || VDataType == MVT::i16) 8090 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); 8091 8092 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 8093 M->getMemoryVT(), M->getMemOperand()); 8094 } 8095 8096 case Intrinsic::amdgcn_raw_buffer_store: 8097 case Intrinsic::amdgcn_raw_buffer_store_format: { 8098 const bool IsFormat = 8099 IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format; 8100 8101 SDValue VData = Op.getOperand(2); 8102 EVT VDataVT = VData.getValueType(); 8103 EVT EltType = VDataVT.getScalarType(); 8104 bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); 8105 if (IsD16) { 8106 VData = handleD16VData(VData, DAG); 8107 VDataVT = VData.getValueType(); 8108 } 8109 8110 if (!isTypeLegal(VDataVT)) { 8111 VData = 8112 DAG.getNode(ISD::BITCAST, DL, 8113 getEquivalentMemType(*DAG.getContext(), VDataVT), VData); 8114 } 8115 8116 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); 8117 SDValue Ops[] = { 8118 Chain, 8119 VData, 8120 Op.getOperand(3), // rsrc 8121 DAG.getConstant(0, DL, MVT::i32), // vindex 8122 Offsets.first, // voffset 8123 Op.getOperand(5), // soffset 8124 Offsets.second, // offset 8125 Op.getOperand(6), // cachepolicy, swizzled buffer 8126 DAG.getTargetConstant(0, DL, MVT::i1), // idxen 8127 }; 8128 unsigned Opc = 8129 IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE; 8130 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; 8131 MemSDNode *M = cast<MemSDNode>(Op); 8132 updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]); 8133 8134 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics 8135 if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) 8136 return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M); 8137 8138 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 8139 M->getMemoryVT(), M->getMemOperand()); 8140 } 8141 8142 case Intrinsic::amdgcn_struct_buffer_store: 8143 case Intrinsic::amdgcn_struct_buffer_store_format: { 8144 const bool IsFormat = 8145 IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format; 8146 8147 SDValue VData = Op.getOperand(2); 8148 EVT VDataVT = VData.getValueType(); 8149 EVT EltType = VDataVT.getScalarType(); 8150 bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); 8151 8152 if (IsD16) { 8153 VData = handleD16VData(VData, DAG); 8154 VDataVT = VData.getValueType(); 8155 } 8156 8157 if (!isTypeLegal(VDataVT)) { 8158 VData = 8159 DAG.getNode(ISD::BITCAST, DL, 8160 getEquivalentMemType(*DAG.getContext(), VDataVT), VData); 8161 } 8162 8163 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); 8164 SDValue Ops[] = { 8165 Chain, 8166 VData, 8167 Op.getOperand(3), // rsrc 8168 Op.getOperand(4), // vindex 8169 Offsets.first, // voffset 8170 Op.getOperand(6), // soffset 8171 Offsets.second, // offset 8172 Op.getOperand(7), // cachepolicy, swizzled buffer 8173 DAG.getTargetConstant(1, DL, MVT::i1), // idxen 8174 }; 8175 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ? 8176 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; 8177 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; 8178 MemSDNode *M = cast<MemSDNode>(Op); 8179 updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); 8180 8181 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics 8182 EVT VDataType = VData.getValueType().getScalarType(); 8183 if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) 8184 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); 8185 8186 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 8187 M->getMemoryVT(), M->getMemOperand()); 8188 } 8189 case Intrinsic::amdgcn_raw_buffer_load_lds: 8190 case Intrinsic::amdgcn_struct_buffer_load_lds: { 8191 unsigned Opc; 8192 bool HasVIndex = IntrinsicID == Intrinsic::amdgcn_struct_buffer_load_lds; 8193 unsigned OpOffset = HasVIndex ? 1 : 0; 8194 SDValue VOffset = Op.getOperand(5 + OpOffset); 8195 auto CVOffset = dyn_cast<ConstantSDNode>(VOffset); 8196 bool HasVOffset = !CVOffset || !CVOffset->isZero(); 8197 unsigned Size = Op->getConstantOperandVal(4); 8198 8199 switch (Size) { 8200 default: 8201 return SDValue(); 8202 case 1: 8203 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN 8204 : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN 8205 : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN 8206 : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET; 8207 break; 8208 case 2: 8209 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN 8210 : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN 8211 : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN 8212 : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET; 8213 break; 8214 case 4: 8215 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN 8216 : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN 8217 : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN 8218 : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET; 8219 break; 8220 } 8221 8222 SDValue M0Val = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 8223 8224 SmallVector<SDValue, 8> Ops; 8225 8226 if (HasVIndex && HasVOffset) 8227 Ops.push_back(DAG.getBuildVector(MVT::v2i32, DL, 8228 { Op.getOperand(5), // VIndex 8229 VOffset })); 8230 else if (HasVIndex) 8231 Ops.push_back(Op.getOperand(5)); 8232 else if (HasVOffset) 8233 Ops.push_back(VOffset); 8234 8235 Ops.push_back(Op.getOperand(2)); // rsrc 8236 Ops.push_back(Op.getOperand(6 + OpOffset)); // soffset 8237 Ops.push_back(Op.getOperand(7 + OpOffset)); // imm offset 8238 unsigned Aux = Op.getConstantOperandVal(8 + OpOffset); 8239 Ops.push_back( 8240 DAG.getTargetConstant(Aux & AMDGPU::CPol::ALL, DL, MVT::i8)); // cpol 8241 Ops.push_back( 8242 DAG.getTargetConstant((Aux >> 3) & 1, DL, MVT::i8)); // swz 8243 Ops.push_back(M0Val.getValue(0)); // Chain 8244 Ops.push_back(M0Val.getValue(1)); // Glue 8245 8246 auto *M = cast<MemSDNode>(Op); 8247 MachineMemOperand *LoadMMO = M->getMemOperand(); 8248 MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo(); 8249 LoadPtrI.Offset = Op->getConstantOperandVal(7 + OpOffset); 8250 MachinePointerInfo StorePtrI = LoadPtrI; 8251 StorePtrI.V = nullptr; 8252 StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS; 8253 8254 auto F = LoadMMO->getFlags() & 8255 ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad); 8256 LoadMMO = MF.getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad, 8257 Size, LoadMMO->getBaseAlign()); 8258 8259 MachineMemOperand *StoreMMO = 8260 MF.getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore, 8261 sizeof(int32_t), LoadMMO->getBaseAlign()); 8262 8263 auto Load = DAG.getMachineNode(Opc, DL, M->getVTList(), Ops); 8264 DAG.setNodeMemRefs(Load, {LoadMMO, StoreMMO}); 8265 8266 return SDValue(Load, 0); 8267 } 8268 case Intrinsic::amdgcn_global_load_lds: { 8269 unsigned Opc; 8270 unsigned Size = Op->getConstantOperandVal(4); 8271 switch (Size) { 8272 default: 8273 return SDValue(); 8274 case 1: 8275 Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE; 8276 break; 8277 case 2: 8278 Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT; 8279 break; 8280 case 4: 8281 Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD; 8282 break; 8283 } 8284 8285 auto *M = cast<MemSDNode>(Op); 8286 SDValue M0Val = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 8287 8288 SmallVector<SDValue, 6> Ops; 8289 8290 SDValue Addr = Op.getOperand(2); // Global ptr 8291 SDValue VOffset; 8292 // Try to split SAddr and VOffset. Global and LDS pointers share the same 8293 // immediate offset, so we cannot use a regular SelectGlobalSAddr(). 8294 if (Addr->isDivergent() && Addr.getOpcode() == ISD::ADD) { 8295 SDValue LHS = Addr.getOperand(0); 8296 SDValue RHS = Addr.getOperand(1); 8297 8298 if (LHS->isDivergent()) 8299 std::swap(LHS, RHS); 8300 8301 if (!LHS->isDivergent() && RHS.getOpcode() == ISD::ZERO_EXTEND && 8302 RHS.getOperand(0).getValueType() == MVT::i32) { 8303 // add (i64 sgpr), (zero_extend (i32 vgpr)) 8304 Addr = LHS; 8305 VOffset = RHS.getOperand(0); 8306 } 8307 } 8308 8309 Ops.push_back(Addr); 8310 if (!Addr->isDivergent()) { 8311 Opc = AMDGPU::getGlobalSaddrOp(Opc); 8312 if (!VOffset) 8313 VOffset = SDValue( 8314 DAG.getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32, 8315 DAG.getTargetConstant(0, DL, MVT::i32)), 0); 8316 Ops.push_back(VOffset); 8317 } 8318 8319 Ops.push_back(Op.getOperand(5)); // Offset 8320 Ops.push_back(Op.getOperand(6)); // CPol 8321 Ops.push_back(M0Val.getValue(0)); // Chain 8322 Ops.push_back(M0Val.getValue(1)); // Glue 8323 8324 MachineMemOperand *LoadMMO = M->getMemOperand(); 8325 MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo(); 8326 LoadPtrI.Offset = Op->getConstantOperandVal(5); 8327 MachinePointerInfo StorePtrI = LoadPtrI; 8328 LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS; 8329 StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS; 8330 auto F = LoadMMO->getFlags() & 8331 ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad); 8332 LoadMMO = MF.getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad, 8333 Size, LoadMMO->getBaseAlign()); 8334 MachineMemOperand *StoreMMO = 8335 MF.getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore, 8336 sizeof(int32_t), Align(4)); 8337 8338 auto Load = DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops); 8339 DAG.setNodeMemRefs(Load, {LoadMMO, StoreMMO}); 8340 8341 return SDValue(Load, 0); 8342 } 8343 case Intrinsic::amdgcn_end_cf: 8344 return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other, 8345 Op->getOperand(2), Chain), 0); 8346 8347 default: { 8348 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 8349 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) 8350 return lowerImage(Op, ImageDimIntr, DAG, true); 8351 8352 return Op; 8353 } 8354 } 8355 } 8356 8357 // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args: 8358 // offset (the offset that is included in bounds checking and swizzling, to be 8359 // split between the instruction's voffset and immoffset fields) and soffset 8360 // (the offset that is excluded from bounds checking and swizzling, to go in 8361 // the instruction's soffset field). This function takes the first kind of 8362 // offset and figures out how to split it between voffset and immoffset. 8363 std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets( 8364 SDValue Offset, SelectionDAG &DAG) const { 8365 SDLoc DL(Offset); 8366 const unsigned MaxImm = 4095; 8367 SDValue N0 = Offset; 8368 ConstantSDNode *C1 = nullptr; 8369 8370 if ((C1 = dyn_cast<ConstantSDNode>(N0))) 8371 N0 = SDValue(); 8372 else if (DAG.isBaseWithConstantOffset(N0)) { 8373 C1 = cast<ConstantSDNode>(N0.getOperand(1)); 8374 N0 = N0.getOperand(0); 8375 } 8376 8377 if (C1) { 8378 unsigned ImmOffset = C1->getZExtValue(); 8379 // If the immediate value is too big for the immoffset field, put the value 8380 // and -4096 into the immoffset field so that the value that is copied/added 8381 // for the voffset field is a multiple of 4096, and it stands more chance 8382 // of being CSEd with the copy/add for another similar load/store. 8383 // However, do not do that rounding down to a multiple of 4096 if that is a 8384 // negative number, as it appears to be illegal to have a negative offset 8385 // in the vgpr, even if adding the immediate offset makes it positive. 8386 unsigned Overflow = ImmOffset & ~MaxImm; 8387 ImmOffset -= Overflow; 8388 if ((int32_t)Overflow < 0) { 8389 Overflow += ImmOffset; 8390 ImmOffset = 0; 8391 } 8392 C1 = cast<ConstantSDNode>(DAG.getTargetConstant(ImmOffset, DL, MVT::i32)); 8393 if (Overflow) { 8394 auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32); 8395 if (!N0) 8396 N0 = OverflowVal; 8397 else { 8398 SDValue Ops[] = { N0, OverflowVal }; 8399 N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops); 8400 } 8401 } 8402 } 8403 if (!N0) 8404 N0 = DAG.getConstant(0, DL, MVT::i32); 8405 if (!C1) 8406 C1 = cast<ConstantSDNode>(DAG.getTargetConstant(0, DL, MVT::i32)); 8407 return {N0, SDValue(C1, 0)}; 8408 } 8409 8410 // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the 8411 // three offsets (voffset, soffset and instoffset) into the SDValue[3] array 8412 // pointed to by Offsets. 8413 void SITargetLowering::setBufferOffsets(SDValue CombinedOffset, 8414 SelectionDAG &DAG, SDValue *Offsets, 8415 Align Alignment) const { 8416 SDLoc DL(CombinedOffset); 8417 if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) { 8418 uint32_t Imm = C->getZExtValue(); 8419 uint32_t SOffset, ImmOffset; 8420 if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, 8421 Alignment)) { 8422 Offsets[0] = DAG.getConstant(0, DL, MVT::i32); 8423 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); 8424 Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); 8425 return; 8426 } 8427 } 8428 if (DAG.isBaseWithConstantOffset(CombinedOffset)) { 8429 SDValue N0 = CombinedOffset.getOperand(0); 8430 SDValue N1 = CombinedOffset.getOperand(1); 8431 uint32_t SOffset, ImmOffset; 8432 int Offset = cast<ConstantSDNode>(N1)->getSExtValue(); 8433 if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset, 8434 Subtarget, Alignment)) { 8435 Offsets[0] = N0; 8436 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); 8437 Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); 8438 return; 8439 } 8440 } 8441 Offsets[0] = CombinedOffset; 8442 Offsets[1] = DAG.getConstant(0, DL, MVT::i32); 8443 Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32); 8444 } 8445 8446 // Handle 8 bit and 16 bit buffer loads 8447 SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG, 8448 EVT LoadVT, SDLoc DL, 8449 ArrayRef<SDValue> Ops, 8450 MemSDNode *M) const { 8451 EVT IntVT = LoadVT.changeTypeToInteger(); 8452 unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ? 8453 AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT; 8454 8455 SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other); 8456 SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList, 8457 Ops, IntVT, 8458 M->getMemOperand()); 8459 SDValue LoadVal = DAG.getNode(ISD::TRUNCATE, DL, IntVT, BufferLoad); 8460 LoadVal = DAG.getNode(ISD::BITCAST, DL, LoadVT, LoadVal); 8461 8462 return DAG.getMergeValues({LoadVal, BufferLoad.getValue(1)}, DL); 8463 } 8464 8465 // Handle 8 bit and 16 bit buffer stores 8466 SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG, 8467 EVT VDataType, SDLoc DL, 8468 SDValue Ops[], 8469 MemSDNode *M) const { 8470 if (VDataType == MVT::f16) 8471 Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]); 8472 8473 SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]); 8474 Ops[1] = BufferStoreExt; 8475 unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE : 8476 AMDGPUISD::BUFFER_STORE_SHORT; 8477 ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9); 8478 return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType, 8479 M->getMemOperand()); 8480 } 8481 8482 static SDValue getLoadExtOrTrunc(SelectionDAG &DAG, 8483 ISD::LoadExtType ExtType, SDValue Op, 8484 const SDLoc &SL, EVT VT) { 8485 if (VT.bitsLT(Op.getValueType())) 8486 return DAG.getNode(ISD::TRUNCATE, SL, VT, Op); 8487 8488 switch (ExtType) { 8489 case ISD::SEXTLOAD: 8490 return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op); 8491 case ISD::ZEXTLOAD: 8492 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op); 8493 case ISD::EXTLOAD: 8494 return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op); 8495 case ISD::NON_EXTLOAD: 8496 return Op; 8497 } 8498 8499 llvm_unreachable("invalid ext type"); 8500 } 8501 8502 SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const { 8503 SelectionDAG &DAG = DCI.DAG; 8504 if (Ld->getAlign() < Align(4) || Ld->isDivergent()) 8505 return SDValue(); 8506 8507 // FIXME: Constant loads should all be marked invariant. 8508 unsigned AS = Ld->getAddressSpace(); 8509 if (AS != AMDGPUAS::CONSTANT_ADDRESS && 8510 AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT && 8511 (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant())) 8512 return SDValue(); 8513 8514 // Don't do this early, since it may interfere with adjacent load merging for 8515 // illegal types. We can avoid losing alignment information for exotic types 8516 // pre-legalize. 8517 EVT MemVT = Ld->getMemoryVT(); 8518 if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) || 8519 MemVT.getSizeInBits() >= 32) 8520 return SDValue(); 8521 8522 SDLoc SL(Ld); 8523 8524 assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && 8525 "unexpected vector extload"); 8526 8527 // TODO: Drop only high part of range. 8528 SDValue Ptr = Ld->getBasePtr(); 8529 SDValue NewLoad = DAG.getLoad( 8530 ISD::UNINDEXED, ISD::NON_EXTLOAD, MVT::i32, SL, Ld->getChain(), Ptr, 8531 Ld->getOffset(), Ld->getPointerInfo(), MVT::i32, Ld->getAlign(), 8532 Ld->getMemOperand()->getFlags(), Ld->getAAInfo(), 8533 nullptr); // Drop ranges 8534 8535 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()); 8536 if (MemVT.isFloatingPoint()) { 8537 assert(Ld->getExtensionType() == ISD::NON_EXTLOAD && 8538 "unexpected fp extload"); 8539 TruncVT = MemVT.changeTypeToInteger(); 8540 } 8541 8542 SDValue Cvt = NewLoad; 8543 if (Ld->getExtensionType() == ISD::SEXTLOAD) { 8544 Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad, 8545 DAG.getValueType(TruncVT)); 8546 } else if (Ld->getExtensionType() == ISD::ZEXTLOAD || 8547 Ld->getExtensionType() == ISD::NON_EXTLOAD) { 8548 Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT); 8549 } else { 8550 assert(Ld->getExtensionType() == ISD::EXTLOAD); 8551 } 8552 8553 EVT VT = Ld->getValueType(0); 8554 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 8555 8556 DCI.AddToWorklist(Cvt.getNode()); 8557 8558 // We may need to handle exotic cases, such as i16->i64 extloads, so insert 8559 // the appropriate extension from the 32-bit load. 8560 Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT); 8561 DCI.AddToWorklist(Cvt.getNode()); 8562 8563 // Handle conversion back to floating point if necessary. 8564 Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt); 8565 8566 return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL); 8567 } 8568 8569 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 8570 SDLoc DL(Op); 8571 LoadSDNode *Load = cast<LoadSDNode>(Op); 8572 ISD::LoadExtType ExtType = Load->getExtensionType(); 8573 EVT MemVT = Load->getMemoryVT(); 8574 8575 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 8576 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) 8577 return SDValue(); 8578 8579 // FIXME: Copied from PPC 8580 // First, load into 32 bits, then truncate to 1 bit. 8581 8582 SDValue Chain = Load->getChain(); 8583 SDValue BasePtr = Load->getBasePtr(); 8584 MachineMemOperand *MMO = Load->getMemOperand(); 8585 8586 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; 8587 8588 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 8589 BasePtr, RealMemVT, MMO); 8590 8591 if (!MemVT.isVector()) { 8592 SDValue Ops[] = { 8593 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 8594 NewLD.getValue(1) 8595 }; 8596 8597 return DAG.getMergeValues(Ops, DL); 8598 } 8599 8600 SmallVector<SDValue, 3> Elts; 8601 for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) { 8602 SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD, 8603 DAG.getConstant(I, DL, MVT::i32)); 8604 8605 Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt)); 8606 } 8607 8608 SDValue Ops[] = { 8609 DAG.getBuildVector(MemVT, DL, Elts), 8610 NewLD.getValue(1) 8611 }; 8612 8613 return DAG.getMergeValues(Ops, DL); 8614 } 8615 8616 if (!MemVT.isVector()) 8617 return SDValue(); 8618 8619 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 8620 "Custom lowering for non-i32 vectors hasn't been implemented."); 8621 8622 Align Alignment = Load->getAlign(); 8623 unsigned AS = Load->getAddressSpace(); 8624 if (Subtarget->hasLDSMisalignedBug() && AS == AMDGPUAS::FLAT_ADDRESS && 8625 Alignment.value() < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) { 8626 return SplitVectorLoad(Op, DAG); 8627 } 8628 8629 MachineFunction &MF = DAG.getMachineFunction(); 8630 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 8631 // If there is a possibility that flat instruction access scratch memory 8632 // then we need to use the same legalization rules we use for private. 8633 if (AS == AMDGPUAS::FLAT_ADDRESS && 8634 !Subtarget->hasMultiDwordFlatScratchAddressing()) 8635 AS = MFI->hasFlatScratchInit() ? 8636 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; 8637 8638 unsigned NumElements = MemVT.getVectorNumElements(); 8639 8640 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 8641 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) { 8642 if (!Op->isDivergent() && Alignment >= Align(4) && NumElements < 32) { 8643 if (MemVT.isPow2VectorType()) 8644 return SDValue(); 8645 return WidenOrSplitVectorLoad(Op, DAG); 8646 } 8647 // Non-uniform loads will be selected to MUBUF instructions, so they 8648 // have the same legalization requirements as global and private 8649 // loads. 8650 // 8651 } 8652 8653 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 8654 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 8655 AS == AMDGPUAS::GLOBAL_ADDRESS) { 8656 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() && 8657 Load->isSimple() && isMemOpHasNoClobberedMemOperand(Load) && 8658 Alignment >= Align(4) && NumElements < 32) { 8659 if (MemVT.isPow2VectorType()) 8660 return SDValue(); 8661 return WidenOrSplitVectorLoad(Op, DAG); 8662 } 8663 // Non-uniform loads will be selected to MUBUF instructions, so they 8664 // have the same legalization requirements as global and private 8665 // loads. 8666 // 8667 } 8668 if (AS == AMDGPUAS::CONSTANT_ADDRESS || 8669 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 8670 AS == AMDGPUAS::GLOBAL_ADDRESS || 8671 AS == AMDGPUAS::FLAT_ADDRESS) { 8672 if (NumElements > 4) 8673 return SplitVectorLoad(Op, DAG); 8674 // v3 loads not supported on SI. 8675 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) 8676 return WidenOrSplitVectorLoad(Op, DAG); 8677 8678 // v3 and v4 loads are supported for private and global memory. 8679 return SDValue(); 8680 } 8681 if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 8682 // Depending on the setting of the private_element_size field in the 8683 // resource descriptor, we can only make private accesses up to a certain 8684 // size. 8685 switch (Subtarget->getMaxPrivateElementSize()) { 8686 case 4: { 8687 SDValue Ops[2]; 8688 std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG); 8689 return DAG.getMergeValues(Ops, DL); 8690 } 8691 case 8: 8692 if (NumElements > 2) 8693 return SplitVectorLoad(Op, DAG); 8694 return SDValue(); 8695 case 16: 8696 // Same as global/flat 8697 if (NumElements > 4) 8698 return SplitVectorLoad(Op, DAG); 8699 // v3 loads not supported on SI. 8700 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) 8701 return WidenOrSplitVectorLoad(Op, DAG); 8702 8703 return SDValue(); 8704 default: 8705 llvm_unreachable("unsupported private_element_size"); 8706 } 8707 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { 8708 bool Fast = false; 8709 auto Flags = Load->getMemOperand()->getFlags(); 8710 if (allowsMisalignedMemoryAccessesImpl(MemVT.getSizeInBits(), AS, 8711 Load->getAlign(), Flags, &Fast) && 8712 Fast) 8713 return SDValue(); 8714 8715 if (MemVT.isVector()) 8716 return SplitVectorLoad(Op, DAG); 8717 } 8718 8719 if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 8720 MemVT, *Load->getMemOperand())) { 8721 SDValue Ops[2]; 8722 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 8723 return DAG.getMergeValues(Ops, DL); 8724 } 8725 8726 return SDValue(); 8727 } 8728 8729 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 8730 EVT VT = Op.getValueType(); 8731 if (VT.getSizeInBits() == 128 || VT.getSizeInBits() == 256) 8732 return splitTernaryVectorOp(Op, DAG); 8733 8734 assert(VT.getSizeInBits() == 64); 8735 8736 SDLoc DL(Op); 8737 SDValue Cond = Op.getOperand(0); 8738 8739 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 8740 SDValue One = DAG.getConstant(1, DL, MVT::i32); 8741 8742 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 8743 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 8744 8745 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 8746 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 8747 8748 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 8749 8750 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 8751 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 8752 8753 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 8754 8755 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 8756 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 8757 } 8758 8759 // Catch division cases where we can use shortcuts with rcp and rsq 8760 // instructions. 8761 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 8762 SelectionDAG &DAG) const { 8763 SDLoc SL(Op); 8764 SDValue LHS = Op.getOperand(0); 8765 SDValue RHS = Op.getOperand(1); 8766 EVT VT = Op.getValueType(); 8767 const SDNodeFlags Flags = Op->getFlags(); 8768 8769 bool AllowInaccurateRcp = Flags.hasApproximateFuncs(); 8770 8771 // Without !fpmath accuracy information, we can't do more because we don't 8772 // know exactly whether rcp is accurate enough to meet !fpmath requirement. 8773 if (!AllowInaccurateRcp) 8774 return SDValue(); 8775 8776 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 8777 if (CLHS->isExactlyValue(1.0)) { 8778 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 8779 // the CI documentation has a worst case error of 1 ulp. 8780 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 8781 // use it as long as we aren't trying to use denormals. 8782 // 8783 // v_rcp_f16 and v_rsq_f16 DO support denormals. 8784 8785 // 1.0 / sqrt(x) -> rsq(x) 8786 8787 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 8788 // error seems really high at 2^29 ULP. 8789 if (RHS.getOpcode() == ISD::FSQRT) 8790 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 8791 8792 // 1.0 / x -> rcp(x) 8793 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 8794 } 8795 8796 // Same as for 1.0, but expand the sign out of the constant. 8797 if (CLHS->isExactlyValue(-1.0)) { 8798 // -1.0 / x -> rcp (fneg x) 8799 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 8800 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 8801 } 8802 } 8803 8804 // Turn into multiply by the reciprocal. 8805 // x / y -> x * (1.0 / y) 8806 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 8807 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags); 8808 } 8809 8810 SDValue SITargetLowering::lowerFastUnsafeFDIV64(SDValue Op, 8811 SelectionDAG &DAG) const { 8812 SDLoc SL(Op); 8813 SDValue X = Op.getOperand(0); 8814 SDValue Y = Op.getOperand(1); 8815 EVT VT = Op.getValueType(); 8816 const SDNodeFlags Flags = Op->getFlags(); 8817 8818 bool AllowInaccurateDiv = Flags.hasApproximateFuncs() || 8819 DAG.getTarget().Options.UnsafeFPMath; 8820 if (!AllowInaccurateDiv) 8821 return SDValue(); 8822 8823 SDValue NegY = DAG.getNode(ISD::FNEG, SL, VT, Y); 8824 SDValue One = DAG.getConstantFP(1.0, SL, VT); 8825 8826 SDValue R = DAG.getNode(AMDGPUISD::RCP, SL, VT, Y); 8827 SDValue Tmp0 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One); 8828 8829 R = DAG.getNode(ISD::FMA, SL, VT, Tmp0, R, R); 8830 SDValue Tmp1 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One); 8831 R = DAG.getNode(ISD::FMA, SL, VT, Tmp1, R, R); 8832 SDValue Ret = DAG.getNode(ISD::FMUL, SL, VT, X, R); 8833 SDValue Tmp2 = DAG.getNode(ISD::FMA, SL, VT, NegY, Ret, X); 8834 return DAG.getNode(ISD::FMA, SL, VT, Tmp2, R, Ret); 8835 } 8836 8837 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 8838 EVT VT, SDValue A, SDValue B, SDValue GlueChain, 8839 SDNodeFlags Flags) { 8840 if (GlueChain->getNumValues() <= 1) { 8841 return DAG.getNode(Opcode, SL, VT, A, B, Flags); 8842 } 8843 8844 assert(GlueChain->getNumValues() == 3); 8845 8846 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 8847 switch (Opcode) { 8848 default: llvm_unreachable("no chain equivalent for opcode"); 8849 case ISD::FMUL: 8850 Opcode = AMDGPUISD::FMUL_W_CHAIN; 8851 break; 8852 } 8853 8854 return DAG.getNode(Opcode, SL, VTList, 8855 {GlueChain.getValue(1), A, B, GlueChain.getValue(2)}, 8856 Flags); 8857 } 8858 8859 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 8860 EVT VT, SDValue A, SDValue B, SDValue C, 8861 SDValue GlueChain, SDNodeFlags Flags) { 8862 if (GlueChain->getNumValues() <= 1) { 8863 return DAG.getNode(Opcode, SL, VT, {A, B, C}, Flags); 8864 } 8865 8866 assert(GlueChain->getNumValues() == 3); 8867 8868 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 8869 switch (Opcode) { 8870 default: llvm_unreachable("no chain equivalent for opcode"); 8871 case ISD::FMA: 8872 Opcode = AMDGPUISD::FMA_W_CHAIN; 8873 break; 8874 } 8875 8876 return DAG.getNode(Opcode, SL, VTList, 8877 {GlueChain.getValue(1), A, B, C, GlueChain.getValue(2)}, 8878 Flags); 8879 } 8880 8881 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { 8882 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 8883 return FastLowered; 8884 8885 SDLoc SL(Op); 8886 SDValue Src0 = Op.getOperand(0); 8887 SDValue Src1 = Op.getOperand(1); 8888 8889 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 8890 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 8891 8892 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); 8893 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); 8894 8895 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); 8896 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); 8897 8898 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); 8899 } 8900 8901 // Faster 2.5 ULP division that does not support denormals. 8902 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 8903 SDLoc SL(Op); 8904 SDValue LHS = Op.getOperand(1); 8905 SDValue RHS = Op.getOperand(2); 8906 8907 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 8908 8909 const APFloat K0Val(BitsToFloat(0x6f800000)); 8910 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 8911 8912 const APFloat K1Val(BitsToFloat(0x2f800000)); 8913 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 8914 8915 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 8916 8917 EVT SetCCVT = 8918 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 8919 8920 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 8921 8922 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 8923 8924 // TODO: Should this propagate fast-math-flags? 8925 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 8926 8927 // rcp does not support denormals. 8928 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 8929 8930 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 8931 8932 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 8933 } 8934 8935 // Returns immediate value for setting the F32 denorm mode when using the 8936 // S_DENORM_MODE instruction. 8937 static SDValue getSPDenormModeValue(int SPDenormMode, SelectionDAG &DAG, 8938 const SDLoc &SL, const GCNSubtarget *ST) { 8939 assert(ST->hasDenormModeInst() && "Requires S_DENORM_MODE"); 8940 int DPDenormModeDefault = hasFP64FP16Denormals(DAG.getMachineFunction()) 8941 ? FP_DENORM_FLUSH_NONE 8942 : FP_DENORM_FLUSH_IN_FLUSH_OUT; 8943 8944 int Mode = SPDenormMode | (DPDenormModeDefault << 2); 8945 return DAG.getTargetConstant(Mode, SL, MVT::i32); 8946 } 8947 8948 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 8949 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 8950 return FastLowered; 8951 8952 // The selection matcher assumes anything with a chain selecting to a 8953 // mayRaiseFPException machine instruction. Since we're introducing a chain 8954 // here, we need to explicitly report nofpexcept for the regular fdiv 8955 // lowering. 8956 SDNodeFlags Flags = Op->getFlags(); 8957 Flags.setNoFPExcept(true); 8958 8959 SDLoc SL(Op); 8960 SDValue LHS = Op.getOperand(0); 8961 SDValue RHS = Op.getOperand(1); 8962 8963 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 8964 8965 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 8966 8967 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 8968 {RHS, RHS, LHS}, Flags); 8969 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 8970 {LHS, RHS, LHS}, Flags); 8971 8972 // Denominator is scaled to not be denormal, so using rcp is ok. 8973 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, 8974 DenominatorScaled, Flags); 8975 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, 8976 DenominatorScaled, Flags); 8977 8978 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | 8979 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | 8980 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); 8981 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i32); 8982 8983 const bool HasFP32Denormals = hasFP32Denormals(DAG.getMachineFunction()); 8984 8985 if (!HasFP32Denormals) { 8986 // Note we can't use the STRICT_FMA/STRICT_FMUL for the non-strict FDIV 8987 // lowering. The chain dependence is insufficient, and we need glue. We do 8988 // not need the glue variants in a strictfp function. 8989 8990 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); 8991 8992 SDNode *EnableDenorm; 8993 if (Subtarget->hasDenormModeInst()) { 8994 const SDValue EnableDenormValue = 8995 getSPDenormModeValue(FP_DENORM_FLUSH_NONE, DAG, SL, Subtarget); 8996 8997 EnableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, BindParamVTs, 8998 DAG.getEntryNode(), EnableDenormValue).getNode(); 8999 } else { 9000 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, 9001 SL, MVT::i32); 9002 EnableDenorm = 9003 DAG.getMachineNode(AMDGPU::S_SETREG_B32, SL, BindParamVTs, 9004 {EnableDenormValue, BitField, DAG.getEntryNode()}); 9005 } 9006 9007 SDValue Ops[3] = { 9008 NegDivScale0, 9009 SDValue(EnableDenorm, 0), 9010 SDValue(EnableDenorm, 1) 9011 }; 9012 9013 NegDivScale0 = DAG.getMergeValues(Ops, SL); 9014 } 9015 9016 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, 9017 ApproxRcp, One, NegDivScale0, Flags); 9018 9019 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, 9020 ApproxRcp, Fma0, Flags); 9021 9022 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, 9023 Fma1, Fma1, Flags); 9024 9025 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, 9026 NumeratorScaled, Mul, Flags); 9027 9028 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, 9029 Fma2, Fma1, Mul, Fma2, Flags); 9030 9031 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, 9032 NumeratorScaled, Fma3, Flags); 9033 9034 if (!HasFP32Denormals) { 9035 SDNode *DisableDenorm; 9036 if (Subtarget->hasDenormModeInst()) { 9037 const SDValue DisableDenormValue = 9038 getSPDenormModeValue(FP_DENORM_FLUSH_IN_FLUSH_OUT, DAG, SL, Subtarget); 9039 9040 DisableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, MVT::Other, 9041 Fma4.getValue(1), DisableDenormValue, 9042 Fma4.getValue(2)).getNode(); 9043 } else { 9044 const SDValue DisableDenormValue = 9045 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); 9046 9047 DisableDenorm = DAG.getMachineNode( 9048 AMDGPU::S_SETREG_B32, SL, MVT::Other, 9049 {DisableDenormValue, BitField, Fma4.getValue(1), Fma4.getValue(2)}); 9050 } 9051 9052 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 9053 SDValue(DisableDenorm, 0), DAG.getRoot()); 9054 DAG.setRoot(OutputChain); 9055 } 9056 9057 SDValue Scale = NumeratorScaled.getValue(1); 9058 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, 9059 {Fma4, Fma1, Fma3, Scale}, Flags); 9060 9061 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS, Flags); 9062 } 9063 9064 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 9065 if (SDValue FastLowered = lowerFastUnsafeFDIV64(Op, DAG)) 9066 return FastLowered; 9067 9068 SDLoc SL(Op); 9069 SDValue X = Op.getOperand(0); 9070 SDValue Y = Op.getOperand(1); 9071 9072 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 9073 9074 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 9075 9076 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 9077 9078 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 9079 9080 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 9081 9082 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 9083 9084 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 9085 9086 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 9087 9088 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 9089 9090 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 9091 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 9092 9093 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 9094 NegDivScale0, Mul, DivScale1); 9095 9096 SDValue Scale; 9097 9098 if (!Subtarget->hasUsableDivScaleConditionOutput()) { 9099 // Workaround a hardware bug on SI where the condition output from div_scale 9100 // is not usable. 9101 9102 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 9103 9104 // Figure out if the scale to use for div_fmas. 9105 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 9106 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 9107 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 9108 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 9109 9110 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 9111 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 9112 9113 SDValue Scale0Hi 9114 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 9115 SDValue Scale1Hi 9116 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 9117 9118 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 9119 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 9120 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 9121 } else { 9122 Scale = DivScale1.getValue(1); 9123 } 9124 9125 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 9126 Fma4, Fma3, Mul, Scale); 9127 9128 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 9129 } 9130 9131 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 9132 EVT VT = Op.getValueType(); 9133 9134 if (VT == MVT::f32) 9135 return LowerFDIV32(Op, DAG); 9136 9137 if (VT == MVT::f64) 9138 return LowerFDIV64(Op, DAG); 9139 9140 if (VT == MVT::f16) 9141 return LowerFDIV16(Op, DAG); 9142 9143 llvm_unreachable("Unexpected type for fdiv"); 9144 } 9145 9146 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 9147 SDLoc DL(Op); 9148 StoreSDNode *Store = cast<StoreSDNode>(Op); 9149 EVT VT = Store->getMemoryVT(); 9150 9151 if (VT == MVT::i1) { 9152 return DAG.getTruncStore(Store->getChain(), DL, 9153 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 9154 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 9155 } 9156 9157 assert(VT.isVector() && 9158 Store->getValue().getValueType().getScalarType() == MVT::i32); 9159 9160 unsigned AS = Store->getAddressSpace(); 9161 if (Subtarget->hasLDSMisalignedBug() && 9162 AS == AMDGPUAS::FLAT_ADDRESS && 9163 Store->getAlign().value() < VT.getStoreSize() && VT.getSizeInBits() > 32) { 9164 return SplitVectorStore(Op, DAG); 9165 } 9166 9167 MachineFunction &MF = DAG.getMachineFunction(); 9168 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 9169 // If there is a possibility that flat instruction access scratch memory 9170 // then we need to use the same legalization rules we use for private. 9171 if (AS == AMDGPUAS::FLAT_ADDRESS && 9172 !Subtarget->hasMultiDwordFlatScratchAddressing()) 9173 AS = MFI->hasFlatScratchInit() ? 9174 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; 9175 9176 unsigned NumElements = VT.getVectorNumElements(); 9177 if (AS == AMDGPUAS::GLOBAL_ADDRESS || 9178 AS == AMDGPUAS::FLAT_ADDRESS) { 9179 if (NumElements > 4) 9180 return SplitVectorStore(Op, DAG); 9181 // v3 stores not supported on SI. 9182 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) 9183 return SplitVectorStore(Op, DAG); 9184 9185 if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 9186 VT, *Store->getMemOperand())) 9187 return expandUnalignedStore(Store, DAG); 9188 9189 return SDValue(); 9190 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { 9191 switch (Subtarget->getMaxPrivateElementSize()) { 9192 case 4: 9193 return scalarizeVectorStore(Store, DAG); 9194 case 8: 9195 if (NumElements > 2) 9196 return SplitVectorStore(Op, DAG); 9197 return SDValue(); 9198 case 16: 9199 if (NumElements > 4 || 9200 (NumElements == 3 && !Subtarget->enableFlatScratch())) 9201 return SplitVectorStore(Op, DAG); 9202 return SDValue(); 9203 default: 9204 llvm_unreachable("unsupported private_element_size"); 9205 } 9206 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { 9207 bool Fast = false; 9208 auto Flags = Store->getMemOperand()->getFlags(); 9209 if (allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AS, 9210 Store->getAlign(), Flags, &Fast) && 9211 Fast) 9212 return SDValue(); 9213 9214 if (VT.isVector()) 9215 return SplitVectorStore(Op, DAG); 9216 9217 return expandUnalignedStore(Store, DAG); 9218 } 9219 9220 // Probably an invalid store. If so we'll end up emitting a selection error. 9221 return SDValue(); 9222 } 9223 9224 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 9225 SDLoc DL(Op); 9226 EVT VT = Op.getValueType(); 9227 SDValue Arg = Op.getOperand(0); 9228 SDValue TrigVal; 9229 9230 // Propagate fast-math flags so that the multiply we introduce can be folded 9231 // if Arg is already the result of a multiply by constant. 9232 auto Flags = Op->getFlags(); 9233 9234 SDValue OneOver2Pi = DAG.getConstantFP(0.5 * numbers::inv_pi, DL, VT); 9235 9236 if (Subtarget->hasTrigReducedRange()) { 9237 SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags); 9238 TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal, Flags); 9239 } else { 9240 TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags); 9241 } 9242 9243 switch (Op.getOpcode()) { 9244 case ISD::FCOS: 9245 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal, Flags); 9246 case ISD::FSIN: 9247 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal, Flags); 9248 default: 9249 llvm_unreachable("Wrong trig opcode"); 9250 } 9251 } 9252 9253 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 9254 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 9255 assert(AtomicNode->isCompareAndSwap()); 9256 unsigned AS = AtomicNode->getAddressSpace(); 9257 9258 // No custom lowering required for local address space 9259 if (!AMDGPU::isFlatGlobalAddrSpace(AS)) 9260 return Op; 9261 9262 // Non-local address space requires custom lowering for atomic compare 9263 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 9264 SDLoc DL(Op); 9265 SDValue ChainIn = Op.getOperand(0); 9266 SDValue Addr = Op.getOperand(1); 9267 SDValue Old = Op.getOperand(2); 9268 SDValue New = Op.getOperand(3); 9269 EVT VT = Op.getValueType(); 9270 MVT SimpleVT = VT.getSimpleVT(); 9271 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 9272 9273 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 9274 SDValue Ops[] = { ChainIn, Addr, NewOld }; 9275 9276 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 9277 Ops, VT, AtomicNode->getMemOperand()); 9278 } 9279 9280 //===----------------------------------------------------------------------===// 9281 // Custom DAG optimizations 9282 //===----------------------------------------------------------------------===// 9283 9284 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 9285 DAGCombinerInfo &DCI) const { 9286 EVT VT = N->getValueType(0); 9287 EVT ScalarVT = VT.getScalarType(); 9288 if (ScalarVT != MVT::f32 && ScalarVT != MVT::f16) 9289 return SDValue(); 9290 9291 SelectionDAG &DAG = DCI.DAG; 9292 SDLoc DL(N); 9293 9294 SDValue Src = N->getOperand(0); 9295 EVT SrcVT = Src.getValueType(); 9296 9297 // TODO: We could try to match extracting the higher bytes, which would be 9298 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 9299 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 9300 // about in practice. 9301 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) { 9302 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 9303 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, MVT::f32, Src); 9304 DCI.AddToWorklist(Cvt.getNode()); 9305 9306 // For the f16 case, fold to a cast to f32 and then cast back to f16. 9307 if (ScalarVT != MVT::f32) { 9308 Cvt = DAG.getNode(ISD::FP_ROUND, DL, VT, Cvt, 9309 DAG.getTargetConstant(0, DL, MVT::i32)); 9310 } 9311 return Cvt; 9312 } 9313 } 9314 9315 return SDValue(); 9316 } 9317 9318 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 9319 9320 // This is a variant of 9321 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 9322 // 9323 // The normal DAG combiner will do this, but only if the add has one use since 9324 // that would increase the number of instructions. 9325 // 9326 // This prevents us from seeing a constant offset that can be folded into a 9327 // memory instruction's addressing mode. If we know the resulting add offset of 9328 // a pointer can be folded into an addressing offset, we can replace the pointer 9329 // operand with the add of new constant offset. This eliminates one of the uses, 9330 // and may allow the remaining use to also be simplified. 9331 // 9332 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 9333 unsigned AddrSpace, 9334 EVT MemVT, 9335 DAGCombinerInfo &DCI) const { 9336 SDValue N0 = N->getOperand(0); 9337 SDValue N1 = N->getOperand(1); 9338 9339 // We only do this to handle cases where it's profitable when there are 9340 // multiple uses of the add, so defer to the standard combine. 9341 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) || 9342 N0->hasOneUse()) 9343 return SDValue(); 9344 9345 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 9346 if (!CN1) 9347 return SDValue(); 9348 9349 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 9350 if (!CAdd) 9351 return SDValue(); 9352 9353 // If the resulting offset is too large, we can't fold it into the addressing 9354 // mode offset. 9355 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 9356 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext()); 9357 9358 AddrMode AM; 9359 AM.HasBaseReg = true; 9360 AM.BaseOffs = Offset.getSExtValue(); 9361 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace)) 9362 return SDValue(); 9363 9364 SelectionDAG &DAG = DCI.DAG; 9365 SDLoc SL(N); 9366 EVT VT = N->getValueType(0); 9367 9368 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 9369 SDValue COffset = DAG.getConstant(Offset, SL, VT); 9370 9371 SDNodeFlags Flags; 9372 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() && 9373 (N0.getOpcode() == ISD::OR || 9374 N0->getFlags().hasNoUnsignedWrap())); 9375 9376 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags); 9377 } 9378 9379 /// MemSDNode::getBasePtr() does not work for intrinsics, which needs to offset 9380 /// by the chain and intrinsic ID. Theoretically we would also need to check the 9381 /// specific intrinsic, but they all place the pointer operand first. 9382 static unsigned getBasePtrIndex(const MemSDNode *N) { 9383 switch (N->getOpcode()) { 9384 case ISD::STORE: 9385 case ISD::INTRINSIC_W_CHAIN: 9386 case ISD::INTRINSIC_VOID: 9387 return 2; 9388 default: 9389 return 1; 9390 } 9391 } 9392 9393 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, 9394 DAGCombinerInfo &DCI) const { 9395 SelectionDAG &DAG = DCI.DAG; 9396 SDLoc SL(N); 9397 9398 unsigned PtrIdx = getBasePtrIndex(N); 9399 SDValue Ptr = N->getOperand(PtrIdx); 9400 9401 // TODO: We could also do this for multiplies. 9402 if (Ptr.getOpcode() == ISD::SHL) { 9403 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(), 9404 N->getMemoryVT(), DCI); 9405 if (NewPtr) { 9406 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); 9407 9408 NewOps[PtrIdx] = NewPtr; 9409 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); 9410 } 9411 } 9412 9413 return SDValue(); 9414 } 9415 9416 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 9417 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 9418 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 9419 (Opc == ISD::XOR && Val == 0); 9420 } 9421 9422 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 9423 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 9424 // integer combine opportunities since most 64-bit operations are decomposed 9425 // this way. TODO: We won't want this for SALU especially if it is an inline 9426 // immediate. 9427 SDValue SITargetLowering::splitBinaryBitConstantOp( 9428 DAGCombinerInfo &DCI, 9429 const SDLoc &SL, 9430 unsigned Opc, SDValue LHS, 9431 const ConstantSDNode *CRHS) const { 9432 uint64_t Val = CRHS->getZExtValue(); 9433 uint32_t ValLo = Lo_32(Val); 9434 uint32_t ValHi = Hi_32(Val); 9435 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 9436 9437 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 9438 bitOpWithConstantIsReducible(Opc, ValHi)) || 9439 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 9440 // If we need to materialize a 64-bit immediate, it will be split up later 9441 // anyway. Avoid creating the harder to understand 64-bit immediate 9442 // materialization. 9443 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 9444 } 9445 9446 return SDValue(); 9447 } 9448 9449 // Returns true if argument is a boolean value which is not serialized into 9450 // memory or argument and does not require v_cndmask_b32 to be deserialized. 9451 static bool isBoolSGPR(SDValue V) { 9452 if (V.getValueType() != MVT::i1) 9453 return false; 9454 switch (V.getOpcode()) { 9455 default: 9456 break; 9457 case ISD::SETCC: 9458 case AMDGPUISD::FP_CLASS: 9459 return true; 9460 case ISD::AND: 9461 case ISD::OR: 9462 case ISD::XOR: 9463 return isBoolSGPR(V.getOperand(0)) && isBoolSGPR(V.getOperand(1)); 9464 } 9465 return false; 9466 } 9467 9468 // If a constant has all zeroes or all ones within each byte return it. 9469 // Otherwise return 0. 9470 static uint32_t getConstantPermuteMask(uint32_t C) { 9471 // 0xff for any zero byte in the mask 9472 uint32_t ZeroByteMask = 0; 9473 if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff; 9474 if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00; 9475 if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000; 9476 if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000; 9477 uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte 9478 if ((NonZeroByteMask & C) != NonZeroByteMask) 9479 return 0; // Partial bytes selected. 9480 return C; 9481 } 9482 9483 // Check if a node selects whole bytes from its operand 0 starting at a byte 9484 // boundary while masking the rest. Returns select mask as in the v_perm_b32 9485 // or -1 if not succeeded. 9486 // Note byte select encoding: 9487 // value 0-3 selects corresponding source byte; 9488 // value 0xc selects zero; 9489 // value 0xff selects 0xff. 9490 static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) { 9491 assert(V.getValueSizeInBits() == 32); 9492 9493 if (V.getNumOperands() != 2) 9494 return ~0; 9495 9496 ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1)); 9497 if (!N1) 9498 return ~0; 9499 9500 uint32_t C = N1->getZExtValue(); 9501 9502 switch (V.getOpcode()) { 9503 default: 9504 break; 9505 case ISD::AND: 9506 if (uint32_t ConstMask = getConstantPermuteMask(C)) { 9507 return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask); 9508 } 9509 break; 9510 9511 case ISD::OR: 9512 if (uint32_t ConstMask = getConstantPermuteMask(C)) { 9513 return (0x03020100 & ~ConstMask) | ConstMask; 9514 } 9515 break; 9516 9517 case ISD::SHL: 9518 if (C % 8) 9519 return ~0; 9520 9521 return uint32_t((0x030201000c0c0c0cull << C) >> 32); 9522 9523 case ISD::SRL: 9524 if (C % 8) 9525 return ~0; 9526 9527 return uint32_t(0x0c0c0c0c03020100ull >> C); 9528 } 9529 9530 return ~0; 9531 } 9532 9533 SDValue SITargetLowering::performAndCombine(SDNode *N, 9534 DAGCombinerInfo &DCI) const { 9535 if (DCI.isBeforeLegalize()) 9536 return SDValue(); 9537 9538 SelectionDAG &DAG = DCI.DAG; 9539 EVT VT = N->getValueType(0); 9540 SDValue LHS = N->getOperand(0); 9541 SDValue RHS = N->getOperand(1); 9542 9543 9544 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 9545 if (VT == MVT::i64 && CRHS) { 9546 if (SDValue Split 9547 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 9548 return Split; 9549 } 9550 9551 if (CRHS && VT == MVT::i32) { 9552 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb 9553 // nb = number of trailing zeroes in mask 9554 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, 9555 // given that we are selecting 8 or 16 bit fields starting at byte boundary. 9556 uint64_t Mask = CRHS->getZExtValue(); 9557 unsigned Bits = countPopulation(Mask); 9558 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && 9559 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { 9560 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { 9561 unsigned Shift = CShift->getZExtValue(); 9562 unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); 9563 unsigned Offset = NB + Shift; 9564 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. 9565 SDLoc SL(N); 9566 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 9567 LHS->getOperand(0), 9568 DAG.getConstant(Offset, SL, MVT::i32), 9569 DAG.getConstant(Bits, SL, MVT::i32)); 9570 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); 9571 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, 9572 DAG.getValueType(NarrowVT)); 9573 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, 9574 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); 9575 return Shl; 9576 } 9577 } 9578 } 9579 9580 // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) 9581 if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM && 9582 isa<ConstantSDNode>(LHS.getOperand(2))) { 9583 uint32_t Sel = getConstantPermuteMask(Mask); 9584 if (!Sel) 9585 return SDValue(); 9586 9587 // Select 0xc for all zero bytes 9588 Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c); 9589 SDLoc DL(N); 9590 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), 9591 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); 9592 } 9593 } 9594 9595 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 9596 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 9597 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 9598 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 9599 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 9600 9601 SDValue X = LHS.getOperand(0); 9602 SDValue Y = RHS.getOperand(0); 9603 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 9604 return SDValue(); 9605 9606 if (LCC == ISD::SETO) { 9607 if (X != LHS.getOperand(1)) 9608 return SDValue(); 9609 9610 if (RCC == ISD::SETUNE) { 9611 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 9612 if (!C1 || !C1->isInfinity() || C1->isNegative()) 9613 return SDValue(); 9614 9615 const uint32_t Mask = SIInstrFlags::N_NORMAL | 9616 SIInstrFlags::N_SUBNORMAL | 9617 SIInstrFlags::N_ZERO | 9618 SIInstrFlags::P_ZERO | 9619 SIInstrFlags::P_SUBNORMAL | 9620 SIInstrFlags::P_NORMAL; 9621 9622 static_assert(((~(SIInstrFlags::S_NAN | 9623 SIInstrFlags::Q_NAN | 9624 SIInstrFlags::N_INFINITY | 9625 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 9626 "mask not equal"); 9627 9628 SDLoc DL(N); 9629 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 9630 X, DAG.getConstant(Mask, DL, MVT::i32)); 9631 } 9632 } 9633 } 9634 9635 if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS) 9636 std::swap(LHS, RHS); 9637 9638 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS && 9639 RHS.hasOneUse()) { 9640 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 9641 // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan) 9642 // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan) 9643 const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 9644 if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask && 9645 (RHS.getOperand(0) == LHS.getOperand(0) && 9646 LHS.getOperand(0) == LHS.getOperand(1))) { 9647 const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN; 9648 unsigned NewMask = LCC == ISD::SETO ? 9649 Mask->getZExtValue() & ~OrdMask : 9650 Mask->getZExtValue() & OrdMask; 9651 9652 SDLoc DL(N); 9653 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0), 9654 DAG.getConstant(NewMask, DL, MVT::i32)); 9655 } 9656 } 9657 9658 if (VT == MVT::i32 && 9659 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { 9660 // and x, (sext cc from i1) => select cc, x, 0 9661 if (RHS.getOpcode() != ISD::SIGN_EXTEND) 9662 std::swap(LHS, RHS); 9663 if (isBoolSGPR(RHS.getOperand(0))) 9664 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0), 9665 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32)); 9666 } 9667 9668 // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) 9669 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 9670 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && 9671 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) { 9672 uint32_t LHSMask = getPermuteMask(DAG, LHS); 9673 uint32_t RHSMask = getPermuteMask(DAG, RHS); 9674 if (LHSMask != ~0u && RHSMask != ~0u) { 9675 // Canonicalize the expression in an attempt to have fewer unique masks 9676 // and therefore fewer registers used to hold the masks. 9677 if (LHSMask > RHSMask) { 9678 std::swap(LHSMask, RHSMask); 9679 std::swap(LHS, RHS); 9680 } 9681 9682 // Select 0xc for each lane used from source operand. Zero has 0xc mask 9683 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. 9684 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 9685 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 9686 9687 // Check of we need to combine values from two sources within a byte. 9688 if (!(LHSUsedLanes & RHSUsedLanes) && 9689 // If we select high and lower word keep it for SDWA. 9690 // TODO: teach SDWA to work with v_perm_b32 and remove the check. 9691 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { 9692 // Each byte in each mask is either selector mask 0-3, or has higher 9693 // bits set in either of masks, which can be 0xff for 0xff or 0x0c for 9694 // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise 9695 // mask which is not 0xff wins. By anding both masks we have a correct 9696 // result except that 0x0c shall be corrected to give 0x0c only. 9697 uint32_t Mask = LHSMask & RHSMask; 9698 for (unsigned I = 0; I < 32; I += 8) { 9699 uint32_t ByteSel = 0xff << I; 9700 if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c) 9701 Mask &= (0x0c << I) & 0xffffffff; 9702 } 9703 9704 // Add 4 to each active LHS lane. It will not affect any existing 0xff 9705 // or 0x0c. 9706 uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404); 9707 SDLoc DL(N); 9708 9709 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, 9710 LHS.getOperand(0), RHS.getOperand(0), 9711 DAG.getConstant(Sel, DL, MVT::i32)); 9712 } 9713 } 9714 } 9715 9716 return SDValue(); 9717 } 9718 9719 SDValue SITargetLowering::performOrCombine(SDNode *N, 9720 DAGCombinerInfo &DCI) const { 9721 SelectionDAG &DAG = DCI.DAG; 9722 SDValue LHS = N->getOperand(0); 9723 SDValue RHS = N->getOperand(1); 9724 9725 EVT VT = N->getValueType(0); 9726 if (VT == MVT::i1) { 9727 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 9728 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 9729 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 9730 SDValue Src = LHS.getOperand(0); 9731 if (Src != RHS.getOperand(0)) 9732 return SDValue(); 9733 9734 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 9735 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 9736 if (!CLHS || !CRHS) 9737 return SDValue(); 9738 9739 // Only 10 bits are used. 9740 static const uint32_t MaxMask = 0x3ff; 9741 9742 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 9743 SDLoc DL(N); 9744 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 9745 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 9746 } 9747 9748 return SDValue(); 9749 } 9750 9751 // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) 9752 if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() && 9753 LHS.getOpcode() == AMDGPUISD::PERM && 9754 isa<ConstantSDNode>(LHS.getOperand(2))) { 9755 uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1)); 9756 if (!Sel) 9757 return SDValue(); 9758 9759 Sel |= LHS.getConstantOperandVal(2); 9760 SDLoc DL(N); 9761 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), 9762 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); 9763 } 9764 9765 // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) 9766 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 9767 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && 9768 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) { 9769 uint32_t LHSMask = getPermuteMask(DAG, LHS); 9770 uint32_t RHSMask = getPermuteMask(DAG, RHS); 9771 if (LHSMask != ~0u && RHSMask != ~0u) { 9772 // Canonicalize the expression in an attempt to have fewer unique masks 9773 // and therefore fewer registers used to hold the masks. 9774 if (LHSMask > RHSMask) { 9775 std::swap(LHSMask, RHSMask); 9776 std::swap(LHS, RHS); 9777 } 9778 9779 // Select 0xc for each lane used from source operand. Zero has 0xc mask 9780 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. 9781 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 9782 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 9783 9784 // Check of we need to combine values from two sources within a byte. 9785 if (!(LHSUsedLanes & RHSUsedLanes) && 9786 // If we select high and lower word keep it for SDWA. 9787 // TODO: teach SDWA to work with v_perm_b32 and remove the check. 9788 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { 9789 // Kill zero bytes selected by other mask. Zero value is 0xc. 9790 LHSMask &= ~RHSUsedLanes; 9791 RHSMask &= ~LHSUsedLanes; 9792 // Add 4 to each active LHS lane 9793 LHSMask |= LHSUsedLanes & 0x04040404; 9794 // Combine masks 9795 uint32_t Sel = LHSMask | RHSMask; 9796 SDLoc DL(N); 9797 9798 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, 9799 LHS.getOperand(0), RHS.getOperand(0), 9800 DAG.getConstant(Sel, DL, MVT::i32)); 9801 } 9802 } 9803 } 9804 9805 if (VT != MVT::i64 || DCI.isBeforeLegalizeOps()) 9806 return SDValue(); 9807 9808 // TODO: This could be a generic combine with a predicate for extracting the 9809 // high half of an integer being free. 9810 9811 // (or i64:x, (zero_extend i32:y)) -> 9812 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 9813 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 9814 RHS.getOpcode() != ISD::ZERO_EXTEND) 9815 std::swap(LHS, RHS); 9816 9817 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 9818 SDValue ExtSrc = RHS.getOperand(0); 9819 EVT SrcVT = ExtSrc.getValueType(); 9820 if (SrcVT == MVT::i32) { 9821 SDLoc SL(N); 9822 SDValue LowLHS, HiBits; 9823 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 9824 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 9825 9826 DCI.AddToWorklist(LowOr.getNode()); 9827 DCI.AddToWorklist(HiBits.getNode()); 9828 9829 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 9830 LowOr, HiBits); 9831 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 9832 } 9833 } 9834 9835 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 9836 if (CRHS) { 9837 if (SDValue Split 9838 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, 9839 N->getOperand(0), CRHS)) 9840 return Split; 9841 } 9842 9843 return SDValue(); 9844 } 9845 9846 SDValue SITargetLowering::performXorCombine(SDNode *N, 9847 DAGCombinerInfo &DCI) const { 9848 if (SDValue RV = reassociateScalarOps(N, DCI.DAG)) 9849 return RV; 9850 9851 EVT VT = N->getValueType(0); 9852 if (VT != MVT::i64) 9853 return SDValue(); 9854 9855 SDValue LHS = N->getOperand(0); 9856 SDValue RHS = N->getOperand(1); 9857 9858 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 9859 if (CRHS) { 9860 if (SDValue Split 9861 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 9862 return Split; 9863 } 9864 9865 return SDValue(); 9866 } 9867 9868 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, 9869 DAGCombinerInfo &DCI) const { 9870 if (!Subtarget->has16BitInsts() || 9871 DCI.getDAGCombineLevel() < AfterLegalizeDAG) 9872 return SDValue(); 9873 9874 EVT VT = N->getValueType(0); 9875 if (VT != MVT::i32) 9876 return SDValue(); 9877 9878 SDValue Src = N->getOperand(0); 9879 if (Src.getValueType() != MVT::i16) 9880 return SDValue(); 9881 9882 return SDValue(); 9883 } 9884 9885 SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N, 9886 DAGCombinerInfo &DCI) 9887 const { 9888 SDValue Src = N->getOperand(0); 9889 auto *VTSign = cast<VTSDNode>(N->getOperand(1)); 9890 9891 if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE && 9892 VTSign->getVT() == MVT::i8) || 9893 (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT && 9894 VTSign->getVT() == MVT::i16)) && 9895 Src.hasOneUse()) { 9896 auto *M = cast<MemSDNode>(Src); 9897 SDValue Ops[] = { 9898 Src.getOperand(0), // Chain 9899 Src.getOperand(1), // rsrc 9900 Src.getOperand(2), // vindex 9901 Src.getOperand(3), // voffset 9902 Src.getOperand(4), // soffset 9903 Src.getOperand(5), // offset 9904 Src.getOperand(6), 9905 Src.getOperand(7) 9906 }; 9907 // replace with BUFFER_LOAD_BYTE/SHORT 9908 SDVTList ResList = DCI.DAG.getVTList(MVT::i32, 9909 Src.getOperand(0).getValueType()); 9910 unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ? 9911 AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT; 9912 SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N), 9913 ResList, 9914 Ops, M->getMemoryVT(), 9915 M->getMemOperand()); 9916 return DCI.DAG.getMergeValues({BufferLoadSignExt, 9917 BufferLoadSignExt.getValue(1)}, SDLoc(N)); 9918 } 9919 return SDValue(); 9920 } 9921 9922 SDValue SITargetLowering::performClassCombine(SDNode *N, 9923 DAGCombinerInfo &DCI) const { 9924 SelectionDAG &DAG = DCI.DAG; 9925 SDValue Mask = N->getOperand(1); 9926 9927 // fp_class x, 0 -> false 9928 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 9929 if (CMask->isZero()) 9930 return DAG.getConstant(0, SDLoc(N), MVT::i1); 9931 } 9932 9933 if (N->getOperand(0).isUndef()) 9934 return DAG.getUNDEF(MVT::i1); 9935 9936 return SDValue(); 9937 } 9938 9939 SDValue SITargetLowering::performRcpCombine(SDNode *N, 9940 DAGCombinerInfo &DCI) const { 9941 EVT VT = N->getValueType(0); 9942 SDValue N0 = N->getOperand(0); 9943 9944 if (N0.isUndef()) 9945 return N0; 9946 9947 if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP || 9948 N0.getOpcode() == ISD::SINT_TO_FP)) { 9949 return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0, 9950 N->getFlags()); 9951 } 9952 9953 if ((VT == MVT::f32 || VT == MVT::f16) && N0.getOpcode() == ISD::FSQRT) { 9954 return DCI.DAG.getNode(AMDGPUISD::RSQ, SDLoc(N), VT, 9955 N0.getOperand(0), N->getFlags()); 9956 } 9957 9958 return AMDGPUTargetLowering::performRcpCombine(N, DCI); 9959 } 9960 9961 bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op, 9962 unsigned MaxDepth) const { 9963 unsigned Opcode = Op.getOpcode(); 9964 if (Opcode == ISD::FCANONICALIZE) 9965 return true; 9966 9967 if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) { 9968 auto F = CFP->getValueAPF(); 9969 if (F.isNaN() && F.isSignaling()) 9970 return false; 9971 return !F.isDenormal() || denormalsEnabledForType(DAG, Op.getValueType()); 9972 } 9973 9974 // If source is a result of another standard FP operation it is already in 9975 // canonical form. 9976 if (MaxDepth == 0) 9977 return false; 9978 9979 switch (Opcode) { 9980 // These will flush denorms if required. 9981 case ISD::FADD: 9982 case ISD::FSUB: 9983 case ISD::FMUL: 9984 case ISD::FCEIL: 9985 case ISD::FFLOOR: 9986 case ISD::FMA: 9987 case ISD::FMAD: 9988 case ISD::FSQRT: 9989 case ISD::FDIV: 9990 case ISD::FREM: 9991 case ISD::FP_ROUND: 9992 case ISD::FP_EXTEND: 9993 case AMDGPUISD::FMUL_LEGACY: 9994 case AMDGPUISD::FMAD_FTZ: 9995 case AMDGPUISD::RCP: 9996 case AMDGPUISD::RSQ: 9997 case AMDGPUISD::RSQ_CLAMP: 9998 case AMDGPUISD::RCP_LEGACY: 9999 case AMDGPUISD::RCP_IFLAG: 10000 case AMDGPUISD::DIV_SCALE: 10001 case AMDGPUISD::DIV_FMAS: 10002 case AMDGPUISD::DIV_FIXUP: 10003 case AMDGPUISD::FRACT: 10004 case AMDGPUISD::LDEXP: 10005 case AMDGPUISD::CVT_PKRTZ_F16_F32: 10006 case AMDGPUISD::CVT_F32_UBYTE0: 10007 case AMDGPUISD::CVT_F32_UBYTE1: 10008 case AMDGPUISD::CVT_F32_UBYTE2: 10009 case AMDGPUISD::CVT_F32_UBYTE3: 10010 return true; 10011 10012 // It can/will be lowered or combined as a bit operation. 10013 // Need to check their input recursively to handle. 10014 case ISD::FNEG: 10015 case ISD::FABS: 10016 case ISD::FCOPYSIGN: 10017 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); 10018 10019 case ISD::FSIN: 10020 case ISD::FCOS: 10021 case ISD::FSINCOS: 10022 return Op.getValueType().getScalarType() != MVT::f16; 10023 10024 case ISD::FMINNUM: 10025 case ISD::FMAXNUM: 10026 case ISD::FMINNUM_IEEE: 10027 case ISD::FMAXNUM_IEEE: 10028 case AMDGPUISD::CLAMP: 10029 case AMDGPUISD::FMED3: 10030 case AMDGPUISD::FMAX3: 10031 case AMDGPUISD::FMIN3: { 10032 // FIXME: Shouldn't treat the generic operations different based these. 10033 // However, we aren't really required to flush the result from 10034 // minnum/maxnum.. 10035 10036 // snans will be quieted, so we only need to worry about denormals. 10037 if (Subtarget->supportsMinMaxDenormModes() || 10038 denormalsEnabledForType(DAG, Op.getValueType())) 10039 return true; 10040 10041 // Flushing may be required. 10042 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such 10043 // targets need to check their input recursively. 10044 10045 // FIXME: Does this apply with clamp? It's implemented with max. 10046 for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) { 10047 if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1)) 10048 return false; 10049 } 10050 10051 return true; 10052 } 10053 case ISD::SELECT: { 10054 return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) && 10055 isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1); 10056 } 10057 case ISD::BUILD_VECTOR: { 10058 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 10059 SDValue SrcOp = Op.getOperand(i); 10060 if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1)) 10061 return false; 10062 } 10063 10064 return true; 10065 } 10066 case ISD::EXTRACT_VECTOR_ELT: 10067 case ISD::EXTRACT_SUBVECTOR: { 10068 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); 10069 } 10070 case ISD::INSERT_VECTOR_ELT: { 10071 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) && 10072 isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1); 10073 } 10074 case ISD::UNDEF: 10075 // Could be anything. 10076 return false; 10077 10078 case ISD::BITCAST: 10079 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); 10080 case ISD::TRUNCATE: { 10081 // Hack round the mess we make when legalizing extract_vector_elt 10082 if (Op.getValueType() == MVT::i16) { 10083 SDValue TruncSrc = Op.getOperand(0); 10084 if (TruncSrc.getValueType() == MVT::i32 && 10085 TruncSrc.getOpcode() == ISD::BITCAST && 10086 TruncSrc.getOperand(0).getValueType() == MVT::v2f16) { 10087 return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1); 10088 } 10089 } 10090 return false; 10091 } 10092 case ISD::INTRINSIC_WO_CHAIN: { 10093 unsigned IntrinsicID 10094 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10095 // TODO: Handle more intrinsics 10096 switch (IntrinsicID) { 10097 case Intrinsic::amdgcn_cvt_pkrtz: 10098 case Intrinsic::amdgcn_cubeid: 10099 case Intrinsic::amdgcn_frexp_mant: 10100 case Intrinsic::amdgcn_fdot2: 10101 case Intrinsic::amdgcn_rcp: 10102 case Intrinsic::amdgcn_rsq: 10103 case Intrinsic::amdgcn_rsq_clamp: 10104 case Intrinsic::amdgcn_rcp_legacy: 10105 case Intrinsic::amdgcn_rsq_legacy: 10106 case Intrinsic::amdgcn_trig_preop: 10107 return true; 10108 default: 10109 break; 10110 } 10111 10112 LLVM_FALLTHROUGH; 10113 } 10114 default: 10115 return denormalsEnabledForType(DAG, Op.getValueType()) && 10116 DAG.isKnownNeverSNaN(Op); 10117 } 10118 10119 llvm_unreachable("invalid operation"); 10120 } 10121 10122 bool SITargetLowering::isCanonicalized(Register Reg, MachineFunction &MF, 10123 unsigned MaxDepth) const { 10124 MachineRegisterInfo &MRI = MF.getRegInfo(); 10125 MachineInstr *MI = MRI.getVRegDef(Reg); 10126 unsigned Opcode = MI->getOpcode(); 10127 10128 if (Opcode == AMDGPU::G_FCANONICALIZE) 10129 return true; 10130 10131 Optional<FPValueAndVReg> FCR; 10132 // Constant splat (can be padded with undef) or scalar constant. 10133 if (mi_match(Reg, MRI, MIPatternMatch::m_GFCstOrSplat(FCR))) { 10134 if (FCR->Value.isSignaling()) 10135 return false; 10136 return !FCR->Value.isDenormal() || 10137 denormalsEnabledForType(MRI.getType(FCR->VReg), MF); 10138 } 10139 10140 if (MaxDepth == 0) 10141 return false; 10142 10143 switch (Opcode) { 10144 case AMDGPU::G_FMINNUM_IEEE: 10145 case AMDGPU::G_FMAXNUM_IEEE: { 10146 if (Subtarget->supportsMinMaxDenormModes() || 10147 denormalsEnabledForType(MRI.getType(Reg), MF)) 10148 return true; 10149 for (const MachineOperand &MO : llvm::drop_begin(MI->operands())) 10150 if (!isCanonicalized(MO.getReg(), MF, MaxDepth - 1)) 10151 return false; 10152 return true; 10153 } 10154 default: 10155 return denormalsEnabledForType(MRI.getType(Reg), MF) && 10156 isKnownNeverSNaN(Reg, MRI); 10157 } 10158 10159 llvm_unreachable("invalid operation"); 10160 } 10161 10162 // Constant fold canonicalize. 10163 SDValue SITargetLowering::getCanonicalConstantFP( 10164 SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const { 10165 // Flush denormals to 0 if not enabled. 10166 if (C.isDenormal() && !denormalsEnabledForType(DAG, VT)) 10167 return DAG.getConstantFP(0.0, SL, VT); 10168 10169 if (C.isNaN()) { 10170 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 10171 if (C.isSignaling()) { 10172 // Quiet a signaling NaN. 10173 // FIXME: Is this supposed to preserve payload bits? 10174 return DAG.getConstantFP(CanonicalQNaN, SL, VT); 10175 } 10176 10177 // Make sure it is the canonical NaN bitpattern. 10178 // 10179 // TODO: Can we use -1 as the canonical NaN value since it's an inline 10180 // immediate? 10181 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 10182 return DAG.getConstantFP(CanonicalQNaN, SL, VT); 10183 } 10184 10185 // Already canonical. 10186 return DAG.getConstantFP(C, SL, VT); 10187 } 10188 10189 static bool vectorEltWillFoldAway(SDValue Op) { 10190 return Op.isUndef() || isa<ConstantFPSDNode>(Op); 10191 } 10192 10193 SDValue SITargetLowering::performFCanonicalizeCombine( 10194 SDNode *N, 10195 DAGCombinerInfo &DCI) const { 10196 SelectionDAG &DAG = DCI.DAG; 10197 SDValue N0 = N->getOperand(0); 10198 EVT VT = N->getValueType(0); 10199 10200 // fcanonicalize undef -> qnan 10201 if (N0.isUndef()) { 10202 APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT)); 10203 return DAG.getConstantFP(QNaN, SDLoc(N), VT); 10204 } 10205 10206 if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) { 10207 EVT VT = N->getValueType(0); 10208 return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF()); 10209 } 10210 10211 // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x), 10212 // (fcanonicalize k) 10213 // 10214 // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0 10215 10216 // TODO: This could be better with wider vectors that will be split to v2f16, 10217 // and to consider uses since there aren't that many packed operations. 10218 if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 && 10219 isTypeLegal(MVT::v2f16)) { 10220 SDLoc SL(N); 10221 SDValue NewElts[2]; 10222 SDValue Lo = N0.getOperand(0); 10223 SDValue Hi = N0.getOperand(1); 10224 EVT EltVT = Lo.getValueType(); 10225 10226 if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) { 10227 for (unsigned I = 0; I != 2; ++I) { 10228 SDValue Op = N0.getOperand(I); 10229 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { 10230 NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT, 10231 CFP->getValueAPF()); 10232 } else if (Op.isUndef()) { 10233 // Handled below based on what the other operand is. 10234 NewElts[I] = Op; 10235 } else { 10236 NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op); 10237 } 10238 } 10239 10240 // If one half is undef, and one is constant, prefer a splat vector rather 10241 // than the normal qNaN. If it's a register, prefer 0.0 since that's 10242 // cheaper to use and may be free with a packed operation. 10243 if (NewElts[0].isUndef()) { 10244 if (isa<ConstantFPSDNode>(NewElts[1])) 10245 NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ? 10246 NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT); 10247 } 10248 10249 if (NewElts[1].isUndef()) { 10250 NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ? 10251 NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT); 10252 } 10253 10254 return DAG.getBuildVector(VT, SL, NewElts); 10255 } 10256 } 10257 10258 unsigned SrcOpc = N0.getOpcode(); 10259 10260 // If it's free to do so, push canonicalizes further up the source, which may 10261 // find a canonical source. 10262 // 10263 // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for 10264 // sNaNs. 10265 if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) { 10266 auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); 10267 if (CRHS && N0.hasOneUse()) { 10268 SDLoc SL(N); 10269 SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT, 10270 N0.getOperand(0)); 10271 SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF()); 10272 DCI.AddToWorklist(Canon0.getNode()); 10273 10274 return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1); 10275 } 10276 } 10277 10278 return isCanonicalized(DAG, N0) ? N0 : SDValue(); 10279 } 10280 10281 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 10282 switch (Opc) { 10283 case ISD::FMAXNUM: 10284 case ISD::FMAXNUM_IEEE: 10285 return AMDGPUISD::FMAX3; 10286 case ISD::SMAX: 10287 return AMDGPUISD::SMAX3; 10288 case ISD::UMAX: 10289 return AMDGPUISD::UMAX3; 10290 case ISD::FMINNUM: 10291 case ISD::FMINNUM_IEEE: 10292 return AMDGPUISD::FMIN3; 10293 case ISD::SMIN: 10294 return AMDGPUISD::SMIN3; 10295 case ISD::UMIN: 10296 return AMDGPUISD::UMIN3; 10297 default: 10298 llvm_unreachable("Not a min/max opcode"); 10299 } 10300 } 10301 10302 SDValue SITargetLowering::performIntMed3ImmCombine( 10303 SelectionDAG &DAG, const SDLoc &SL, 10304 SDValue Op0, SDValue Op1, bool Signed) const { 10305 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 10306 if (!K1) 10307 return SDValue(); 10308 10309 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 10310 if (!K0) 10311 return SDValue(); 10312 10313 if (Signed) { 10314 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 10315 return SDValue(); 10316 } else { 10317 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 10318 return SDValue(); 10319 } 10320 10321 EVT VT = K0->getValueType(0); 10322 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; 10323 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { 10324 return DAG.getNode(Med3Opc, SL, VT, 10325 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 10326 } 10327 10328 // If there isn't a 16-bit med3 operation, convert to 32-bit. 10329 if (VT == MVT::i16) { 10330 MVT NVT = MVT::i32; 10331 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 10332 10333 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); 10334 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); 10335 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); 10336 10337 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); 10338 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); 10339 } 10340 10341 return SDValue(); 10342 } 10343 10344 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { 10345 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 10346 return C; 10347 10348 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) { 10349 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode()) 10350 return C; 10351 } 10352 10353 return nullptr; 10354 } 10355 10356 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, 10357 const SDLoc &SL, 10358 SDValue Op0, 10359 SDValue Op1) const { 10360 ConstantFPSDNode *K1 = getSplatConstantFP(Op1); 10361 if (!K1) 10362 return SDValue(); 10363 10364 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1)); 10365 if (!K0) 10366 return SDValue(); 10367 10368 // Ordered >= (although NaN inputs should have folded away by now). 10369 if (K0->getValueAPF() > K1->getValueAPF()) 10370 return SDValue(); 10371 10372 const MachineFunction &MF = DAG.getMachineFunction(); 10373 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 10374 10375 // TODO: Check IEEE bit enabled? 10376 EVT VT = Op0.getValueType(); 10377 if (Info->getMode().DX10Clamp) { 10378 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the 10379 // hardware fmed3 behavior converting to a min. 10380 // FIXME: Should this be allowing -0.0? 10381 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) 10382 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); 10383 } 10384 10385 // med3 for f16 is only available on gfx9+, and not available for v2f16. 10386 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) { 10387 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 10388 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would 10389 // then give the other result, which is different from med3 with a NaN 10390 // input. 10391 SDValue Var = Op0.getOperand(0); 10392 if (!DAG.isKnownNeverSNaN(Var)) 10393 return SDValue(); 10394 10395 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 10396 10397 if ((!K0->hasOneUse() || 10398 TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) && 10399 (!K1->hasOneUse() || 10400 TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) { 10401 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 10402 Var, SDValue(K0, 0), SDValue(K1, 0)); 10403 } 10404 } 10405 10406 return SDValue(); 10407 } 10408 10409 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 10410 DAGCombinerInfo &DCI) const { 10411 SelectionDAG &DAG = DCI.DAG; 10412 10413 EVT VT = N->getValueType(0); 10414 unsigned Opc = N->getOpcode(); 10415 SDValue Op0 = N->getOperand(0); 10416 SDValue Op1 = N->getOperand(1); 10417 10418 // Only do this if the inner op has one use since this will just increases 10419 // register pressure for no benefit. 10420 10421 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && 10422 !VT.isVector() && 10423 (VT == MVT::i32 || VT == MVT::f32 || 10424 ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) { 10425 // max(max(a, b), c) -> max3(a, b, c) 10426 // min(min(a, b), c) -> min3(a, b, c) 10427 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 10428 SDLoc DL(N); 10429 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 10430 DL, 10431 N->getValueType(0), 10432 Op0.getOperand(0), 10433 Op0.getOperand(1), 10434 Op1); 10435 } 10436 10437 // Try commuted. 10438 // max(a, max(b, c)) -> max3(a, b, c) 10439 // min(a, min(b, c)) -> min3(a, b, c) 10440 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 10441 SDLoc DL(N); 10442 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 10443 DL, 10444 N->getValueType(0), 10445 Op0, 10446 Op1.getOperand(0), 10447 Op1.getOperand(1)); 10448 } 10449 } 10450 10451 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 10452 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 10453 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 10454 return Med3; 10455 } 10456 10457 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 10458 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 10459 return Med3; 10460 } 10461 10462 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 10463 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 10464 (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) || 10465 (Opc == AMDGPUISD::FMIN_LEGACY && 10466 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 10467 (VT == MVT::f32 || VT == MVT::f64 || 10468 (VT == MVT::f16 && Subtarget->has16BitInsts()) || 10469 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) && 10470 Op0.hasOneUse()) { 10471 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 10472 return Res; 10473 } 10474 10475 return SDValue(); 10476 } 10477 10478 static bool isClampZeroToOne(SDValue A, SDValue B) { 10479 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { 10480 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { 10481 // FIXME: Should this be allowing -0.0? 10482 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || 10483 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); 10484 } 10485 } 10486 10487 return false; 10488 } 10489 10490 // FIXME: Should only worry about snans for version with chain. 10491 SDValue SITargetLowering::performFMed3Combine(SDNode *N, 10492 DAGCombinerInfo &DCI) const { 10493 EVT VT = N->getValueType(0); 10494 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and 10495 // NaNs. With a NaN input, the order of the operands may change the result. 10496 10497 SelectionDAG &DAG = DCI.DAG; 10498 SDLoc SL(N); 10499 10500 SDValue Src0 = N->getOperand(0); 10501 SDValue Src1 = N->getOperand(1); 10502 SDValue Src2 = N->getOperand(2); 10503 10504 if (isClampZeroToOne(Src0, Src1)) { 10505 // const_a, const_b, x -> clamp is safe in all cases including signaling 10506 // nans. 10507 // FIXME: Should this be allowing -0.0? 10508 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); 10509 } 10510 10511 const MachineFunction &MF = DAG.getMachineFunction(); 10512 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 10513 10514 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother 10515 // handling no dx10-clamp? 10516 if (Info->getMode().DX10Clamp) { 10517 // If NaNs is clamped to 0, we are free to reorder the inputs. 10518 10519 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 10520 std::swap(Src0, Src1); 10521 10522 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) 10523 std::swap(Src1, Src2); 10524 10525 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 10526 std::swap(Src0, Src1); 10527 10528 if (isClampZeroToOne(Src1, Src2)) 10529 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); 10530 } 10531 10532 return SDValue(); 10533 } 10534 10535 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, 10536 DAGCombinerInfo &DCI) const { 10537 SDValue Src0 = N->getOperand(0); 10538 SDValue Src1 = N->getOperand(1); 10539 if (Src0.isUndef() && Src1.isUndef()) 10540 return DCI.DAG.getUNDEF(N->getValueType(0)); 10541 return SDValue(); 10542 } 10543 10544 // Check if EXTRACT_VECTOR_ELT/INSERT_VECTOR_ELT (<n x e>, var-idx) should be 10545 // expanded into a set of cmp/select instructions. 10546 bool SITargetLowering::shouldExpandVectorDynExt(unsigned EltSize, 10547 unsigned NumElem, 10548 bool IsDivergentIdx, 10549 const GCNSubtarget *Subtarget) { 10550 if (UseDivergentRegisterIndexing) 10551 return false; 10552 10553 unsigned VecSize = EltSize * NumElem; 10554 10555 // Sub-dword vectors of size 2 dword or less have better implementation. 10556 if (VecSize <= 64 && EltSize < 32) 10557 return false; 10558 10559 // Always expand the rest of sub-dword instructions, otherwise it will be 10560 // lowered via memory. 10561 if (EltSize < 32) 10562 return true; 10563 10564 // Always do this if var-idx is divergent, otherwise it will become a loop. 10565 if (IsDivergentIdx) 10566 return true; 10567 10568 // Large vectors would yield too many compares and v_cndmask_b32 instructions. 10569 unsigned NumInsts = NumElem /* Number of compares */ + 10570 ((EltSize + 31) / 32) * NumElem /* Number of cndmasks */; 10571 10572 // On some architectures (GFX9) movrel is not available and it's better 10573 // to expand. 10574 if (!Subtarget->hasMovrel()) 10575 return NumInsts <= 16; 10576 10577 // If movrel is available, use it instead of expanding for vector of 8 10578 // elements. 10579 return NumInsts <= 15; 10580 } 10581 10582 bool SITargetLowering::shouldExpandVectorDynExt(SDNode *N) const { 10583 SDValue Idx = N->getOperand(N->getNumOperands() - 1); 10584 if (isa<ConstantSDNode>(Idx)) 10585 return false; 10586 10587 SDValue Vec = N->getOperand(0); 10588 EVT VecVT = Vec.getValueType(); 10589 EVT EltVT = VecVT.getVectorElementType(); 10590 unsigned EltSize = EltVT.getSizeInBits(); 10591 unsigned NumElem = VecVT.getVectorNumElements(); 10592 10593 return SITargetLowering::shouldExpandVectorDynExt( 10594 EltSize, NumElem, Idx->isDivergent(), getSubtarget()); 10595 } 10596 10597 SDValue SITargetLowering::performExtractVectorEltCombine( 10598 SDNode *N, DAGCombinerInfo &DCI) const { 10599 SDValue Vec = N->getOperand(0); 10600 SelectionDAG &DAG = DCI.DAG; 10601 10602 EVT VecVT = Vec.getValueType(); 10603 EVT EltVT = VecVT.getVectorElementType(); 10604 10605 if ((Vec.getOpcode() == ISD::FNEG || 10606 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) { 10607 SDLoc SL(N); 10608 EVT EltVT = N->getValueType(0); 10609 SDValue Idx = N->getOperand(1); 10610 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 10611 Vec.getOperand(0), Idx); 10612 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt); 10613 } 10614 10615 // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx) 10616 // => 10617 // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx) 10618 // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx) 10619 // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt 10620 if (Vec.hasOneUse() && DCI.isBeforeLegalize()) { 10621 SDLoc SL(N); 10622 EVT EltVT = N->getValueType(0); 10623 SDValue Idx = N->getOperand(1); 10624 unsigned Opc = Vec.getOpcode(); 10625 10626 switch(Opc) { 10627 default: 10628 break; 10629 // TODO: Support other binary operations. 10630 case ISD::FADD: 10631 case ISD::FSUB: 10632 case ISD::FMUL: 10633 case ISD::ADD: 10634 case ISD::UMIN: 10635 case ISD::UMAX: 10636 case ISD::SMIN: 10637 case ISD::SMAX: 10638 case ISD::FMAXNUM: 10639 case ISD::FMINNUM: 10640 case ISD::FMAXNUM_IEEE: 10641 case ISD::FMINNUM_IEEE: { 10642 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 10643 Vec.getOperand(0), Idx); 10644 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 10645 Vec.getOperand(1), Idx); 10646 10647 DCI.AddToWorklist(Elt0.getNode()); 10648 DCI.AddToWorklist(Elt1.getNode()); 10649 return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags()); 10650 } 10651 } 10652 } 10653 10654 unsigned VecSize = VecVT.getSizeInBits(); 10655 unsigned EltSize = EltVT.getSizeInBits(); 10656 10657 // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx) 10658 if (shouldExpandVectorDynExt(N)) { 10659 SDLoc SL(N); 10660 SDValue Idx = N->getOperand(1); 10661 SDValue V; 10662 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { 10663 SDValue IC = DAG.getVectorIdxConstant(I, SL); 10664 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); 10665 if (I == 0) 10666 V = Elt; 10667 else 10668 V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ); 10669 } 10670 return V; 10671 } 10672 10673 if (!DCI.isBeforeLegalize()) 10674 return SDValue(); 10675 10676 // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit 10677 // elements. This exposes more load reduction opportunities by replacing 10678 // multiple small extract_vector_elements with a single 32-bit extract. 10679 auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10680 if (isa<MemSDNode>(Vec) && 10681 EltSize <= 16 && 10682 EltVT.isByteSized() && 10683 VecSize > 32 && 10684 VecSize % 32 == 0 && 10685 Idx) { 10686 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT); 10687 10688 unsigned BitIndex = Idx->getZExtValue() * EltSize; 10689 unsigned EltIdx = BitIndex / 32; 10690 unsigned LeftoverBitIdx = BitIndex % 32; 10691 SDLoc SL(N); 10692 10693 SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec); 10694 DCI.AddToWorklist(Cast.getNode()); 10695 10696 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast, 10697 DAG.getConstant(EltIdx, SL, MVT::i32)); 10698 DCI.AddToWorklist(Elt.getNode()); 10699 SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt, 10700 DAG.getConstant(LeftoverBitIdx, SL, MVT::i32)); 10701 DCI.AddToWorklist(Srl.getNode()); 10702 10703 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl); 10704 DCI.AddToWorklist(Trunc.getNode()); 10705 return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc); 10706 } 10707 10708 return SDValue(); 10709 } 10710 10711 SDValue 10712 SITargetLowering::performInsertVectorEltCombine(SDNode *N, 10713 DAGCombinerInfo &DCI) const { 10714 SDValue Vec = N->getOperand(0); 10715 SDValue Idx = N->getOperand(2); 10716 EVT VecVT = Vec.getValueType(); 10717 EVT EltVT = VecVT.getVectorElementType(); 10718 10719 // INSERT_VECTOR_ELT (<n x e>, var-idx) 10720 // => BUILD_VECTOR n x select (e, const-idx) 10721 if (!shouldExpandVectorDynExt(N)) 10722 return SDValue(); 10723 10724 SelectionDAG &DAG = DCI.DAG; 10725 SDLoc SL(N); 10726 SDValue Ins = N->getOperand(1); 10727 EVT IdxVT = Idx.getValueType(); 10728 10729 SmallVector<SDValue, 16> Ops; 10730 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { 10731 SDValue IC = DAG.getConstant(I, SL, IdxVT); 10732 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); 10733 SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ); 10734 Ops.push_back(V); 10735 } 10736 10737 return DAG.getBuildVector(VecVT, SL, Ops); 10738 } 10739 10740 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, 10741 const SDNode *N0, 10742 const SDNode *N1) const { 10743 EVT VT = N0->getValueType(0); 10744 10745 // Only do this if we are not trying to support denormals. v_mad_f32 does not 10746 // support denormals ever. 10747 if (((VT == MVT::f32 && !hasFP32Denormals(DAG.getMachineFunction())) || 10748 (VT == MVT::f16 && !hasFP64FP16Denormals(DAG.getMachineFunction()) && 10749 getSubtarget()->hasMadF16())) && 10750 isOperationLegal(ISD::FMAD, VT)) 10751 return ISD::FMAD; 10752 10753 const TargetOptions &Options = DAG.getTarget().Options; 10754 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 10755 (N0->getFlags().hasAllowContract() && 10756 N1->getFlags().hasAllowContract())) && 10757 isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) { 10758 return ISD::FMA; 10759 } 10760 10761 return 0; 10762 } 10763 10764 // For a reassociatable opcode perform: 10765 // op x, (op y, z) -> op (op x, z), y, if x and z are uniform 10766 SDValue SITargetLowering::reassociateScalarOps(SDNode *N, 10767 SelectionDAG &DAG) const { 10768 EVT VT = N->getValueType(0); 10769 if (VT != MVT::i32 && VT != MVT::i64) 10770 return SDValue(); 10771 10772 if (DAG.isBaseWithConstantOffset(SDValue(N, 0))) 10773 return SDValue(); 10774 10775 unsigned Opc = N->getOpcode(); 10776 SDValue Op0 = N->getOperand(0); 10777 SDValue Op1 = N->getOperand(1); 10778 10779 if (!(Op0->isDivergent() ^ Op1->isDivergent())) 10780 return SDValue(); 10781 10782 if (Op0->isDivergent()) 10783 std::swap(Op0, Op1); 10784 10785 if (Op1.getOpcode() != Opc || !Op1.hasOneUse()) 10786 return SDValue(); 10787 10788 SDValue Op2 = Op1.getOperand(1); 10789 Op1 = Op1.getOperand(0); 10790 if (!(Op1->isDivergent() ^ Op2->isDivergent())) 10791 return SDValue(); 10792 10793 if (Op1->isDivergent()) 10794 std::swap(Op1, Op2); 10795 10796 SDLoc SL(N); 10797 SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1); 10798 return DAG.getNode(Opc, SL, VT, Add1, Op2); 10799 } 10800 10801 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, 10802 EVT VT, 10803 SDValue N0, SDValue N1, SDValue N2, 10804 bool Signed) { 10805 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; 10806 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); 10807 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); 10808 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); 10809 } 10810 10811 // Fold (add (mul x, y), z) --> (mad_[iu]64_[iu]32 x, y, z) plus high 10812 // multiplies, if any. 10813 // 10814 // Full 64-bit multiplies that feed into an addition are lowered here instead 10815 // of using the generic expansion. The generic expansion ends up with 10816 // a tree of ADD nodes that prevents us from using the "add" part of the 10817 // MAD instruction. The expansion produced here results in a chain of ADDs 10818 // instead of a tree. 10819 SDValue SITargetLowering::tryFoldToMad64_32(SDNode *N, 10820 DAGCombinerInfo &DCI) const { 10821 assert(N->getOpcode() == ISD::ADD); 10822 10823 SelectionDAG &DAG = DCI.DAG; 10824 EVT VT = N->getValueType(0); 10825 SDLoc SL(N); 10826 SDValue LHS = N->getOperand(0); 10827 SDValue RHS = N->getOperand(1); 10828 10829 if (VT.isVector()) 10830 return SDValue(); 10831 10832 // S_MUL_HI_[IU]32 was added in gfx9, which allows us to keep the overall 10833 // result in scalar registers for uniform values. 10834 if (!N->isDivergent() && Subtarget->hasSMulHi()) 10835 return SDValue(); 10836 10837 unsigned NumBits = VT.getScalarSizeInBits(); 10838 if (NumBits <= 32 || NumBits > 64) 10839 return SDValue(); 10840 10841 if (LHS.getOpcode() != ISD::MUL) { 10842 assert(RHS.getOpcode() == ISD::MUL); 10843 std::swap(LHS, RHS); 10844 } 10845 10846 // Avoid the fold if it would unduly increase the number of multiplies due to 10847 // multiple uses, except on hardware with full-rate multiply-add (which is 10848 // part of full-rate 64-bit ops). 10849 if (!Subtarget->hasFullRate64Ops()) { 10850 unsigned NumUsers = 0; 10851 for (SDNode *Use : LHS->uses()) { 10852 // There is a use that does not feed into addition, so the multiply can't 10853 // be removed. We prefer MUL + ADD + ADDC over MAD + MUL. 10854 if (Use->getOpcode() != ISD::ADD) 10855 return SDValue(); 10856 10857 // We prefer 2xMAD over MUL + 2xADD + 2xADDC (code density), and prefer 10858 // MUL + 3xADD + 3xADDC over 3xMAD. 10859 ++NumUsers; 10860 if (NumUsers >= 3) 10861 return SDValue(); 10862 } 10863 } 10864 10865 SDValue MulLHS = LHS.getOperand(0); 10866 SDValue MulRHS = LHS.getOperand(1); 10867 SDValue AddRHS = RHS; 10868 10869 // Always check whether operands are small unsigned values, since that 10870 // knowledge is useful in more cases. Check for small signed values only if 10871 // doing so can unlock a shorter code sequence. 10872 bool MulLHSUnsigned32 = numBitsUnsigned(MulLHS, DAG) <= 32; 10873 bool MulRHSUnsigned32 = numBitsUnsigned(MulRHS, DAG) <= 32; 10874 10875 bool MulSignedLo = false; 10876 if (!MulLHSUnsigned32 || !MulRHSUnsigned32) { 10877 MulSignedLo = numBitsSigned(MulLHS, DAG) <= 32 && 10878 numBitsSigned(MulRHS, DAG) <= 32; 10879 } 10880 10881 // The operands and final result all have the same number of bits. If 10882 // operands need to be extended, they can be extended with garbage. The 10883 // resulting garbage in the high bits of the mad_[iu]64_[iu]32 result is 10884 // truncated away in the end. 10885 if (VT != MVT::i64) { 10886 MulLHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i64, MulLHS); 10887 MulRHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i64, MulRHS); 10888 AddRHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i64, AddRHS); 10889 } 10890 10891 // The basic code generated is conceptually straightforward. Pseudo code: 10892 // 10893 // accum = mad_64_32 lhs.lo, rhs.lo, accum 10894 // accum.hi = add (mul lhs.hi, rhs.lo), accum.hi 10895 // accum.hi = add (mul lhs.lo, rhs.hi), accum.hi 10896 // 10897 // The second and third lines are optional, depending on whether the factors 10898 // are {sign,zero}-extended or not. 10899 // 10900 // The actual DAG is noisier than the pseudo code, but only due to 10901 // instructions that disassemble values into low and high parts, and 10902 // assemble the final result. 10903 SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 10904 SDValue One = DAG.getConstant(1, SL, MVT::i32); 10905 10906 auto MulLHSLo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, MulLHS); 10907 auto MulRHSLo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, MulRHS); 10908 SDValue Accum = 10909 getMad64_32(DAG, SL, MVT::i64, MulLHSLo, MulRHSLo, AddRHS, MulSignedLo); 10910 10911 if (!MulSignedLo && (!MulLHSUnsigned32 || !MulRHSUnsigned32)) { 10912 auto AccumLo = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, Accum, Zero); 10913 auto AccumHi = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, Accum, One); 10914 10915 if (!MulLHSUnsigned32) { 10916 auto MulLHSHi = 10917 DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, MulLHS, One); 10918 SDValue MulHi = DAG.getNode(ISD::MUL, SL, MVT::i32, MulLHSHi, MulRHSLo); 10919 AccumHi = DAG.getNode(ISD::ADD, SL, MVT::i32, MulHi, AccumHi); 10920 } 10921 10922 if (!MulRHSUnsigned32) { 10923 auto MulRHSHi = 10924 DAG.getNode(ISD::EXTRACT_ELEMENT, SL, MVT::i32, MulRHS, One); 10925 SDValue MulHi = DAG.getNode(ISD::MUL, SL, MVT::i32, MulLHSLo, MulRHSHi); 10926 AccumHi = DAG.getNode(ISD::ADD, SL, MVT::i32, MulHi, AccumHi); 10927 } 10928 10929 Accum = DAG.getBuildVector(MVT::v2i32, SL, {AccumLo, AccumHi}); 10930 Accum = DAG.getBitcast(MVT::i64, Accum); 10931 } 10932 10933 if (VT != MVT::i64) 10934 Accum = DAG.getNode(ISD::TRUNCATE, SL, VT, Accum); 10935 return Accum; 10936 } 10937 10938 SDValue SITargetLowering::performAddCombine(SDNode *N, 10939 DAGCombinerInfo &DCI) const { 10940 SelectionDAG &DAG = DCI.DAG; 10941 EVT VT = N->getValueType(0); 10942 SDLoc SL(N); 10943 SDValue LHS = N->getOperand(0); 10944 SDValue RHS = N->getOperand(1); 10945 10946 if (LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) { 10947 if (Subtarget->hasMad64_32()) { 10948 if (SDValue Folded = tryFoldToMad64_32(N, DCI)) 10949 return Folded; 10950 } 10951 10952 return SDValue(); 10953 } 10954 10955 if (SDValue V = reassociateScalarOps(N, DAG)) { 10956 return V; 10957 } 10958 10959 if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG()) 10960 return SDValue(); 10961 10962 // add x, zext (setcc) => addcarry x, 0, setcc 10963 // add x, sext (setcc) => subcarry x, 0, setcc 10964 unsigned Opc = LHS.getOpcode(); 10965 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || 10966 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) 10967 std::swap(RHS, LHS); 10968 10969 Opc = RHS.getOpcode(); 10970 switch (Opc) { 10971 default: break; 10972 case ISD::ZERO_EXTEND: 10973 case ISD::SIGN_EXTEND: 10974 case ISD::ANY_EXTEND: { 10975 auto Cond = RHS.getOperand(0); 10976 // If this won't be a real VOPC output, we would still need to insert an 10977 // extra instruction anyway. 10978 if (!isBoolSGPR(Cond)) 10979 break; 10980 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); 10981 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; 10982 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; 10983 return DAG.getNode(Opc, SL, VTList, Args); 10984 } 10985 case ISD::ADDCARRY: { 10986 // add x, (addcarry y, 0, cc) => addcarry x, y, cc 10987 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 10988 if (!C || C->getZExtValue() != 0) break; 10989 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; 10990 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); 10991 } 10992 } 10993 return SDValue(); 10994 } 10995 10996 SDValue SITargetLowering::performSubCombine(SDNode *N, 10997 DAGCombinerInfo &DCI) const { 10998 SelectionDAG &DAG = DCI.DAG; 10999 EVT VT = N->getValueType(0); 11000 11001 if (VT != MVT::i32) 11002 return SDValue(); 11003 11004 SDLoc SL(N); 11005 SDValue LHS = N->getOperand(0); 11006 SDValue RHS = N->getOperand(1); 11007 11008 // sub x, zext (setcc) => subcarry x, 0, setcc 11009 // sub x, sext (setcc) => addcarry x, 0, setcc 11010 unsigned Opc = RHS.getOpcode(); 11011 switch (Opc) { 11012 default: break; 11013 case ISD::ZERO_EXTEND: 11014 case ISD::SIGN_EXTEND: 11015 case ISD::ANY_EXTEND: { 11016 auto Cond = RHS.getOperand(0); 11017 // If this won't be a real VOPC output, we would still need to insert an 11018 // extra instruction anyway. 11019 if (!isBoolSGPR(Cond)) 11020 break; 11021 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); 11022 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; 11023 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::ADDCARRY : ISD::SUBCARRY; 11024 return DAG.getNode(Opc, SL, VTList, Args); 11025 } 11026 } 11027 11028 if (LHS.getOpcode() == ISD::SUBCARRY) { 11029 // sub (subcarry x, 0, cc), y => subcarry x, y, cc 11030 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 11031 if (!C || !C->isZero()) 11032 return SDValue(); 11033 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; 11034 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); 11035 } 11036 return SDValue(); 11037 } 11038 11039 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, 11040 DAGCombinerInfo &DCI) const { 11041 11042 if (N->getValueType(0) != MVT::i32) 11043 return SDValue(); 11044 11045 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 11046 if (!C || C->getZExtValue() != 0) 11047 return SDValue(); 11048 11049 SelectionDAG &DAG = DCI.DAG; 11050 SDValue LHS = N->getOperand(0); 11051 11052 // addcarry (add x, y), 0, cc => addcarry x, y, cc 11053 // subcarry (sub x, y), 0, cc => subcarry x, y, cc 11054 unsigned LHSOpc = LHS.getOpcode(); 11055 unsigned Opc = N->getOpcode(); 11056 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || 11057 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { 11058 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; 11059 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); 11060 } 11061 return SDValue(); 11062 } 11063 11064 SDValue SITargetLowering::performFAddCombine(SDNode *N, 11065 DAGCombinerInfo &DCI) const { 11066 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 11067 return SDValue(); 11068 11069 SelectionDAG &DAG = DCI.DAG; 11070 EVT VT = N->getValueType(0); 11071 11072 SDLoc SL(N); 11073 SDValue LHS = N->getOperand(0); 11074 SDValue RHS = N->getOperand(1); 11075 11076 // These should really be instruction patterns, but writing patterns with 11077 // source modifiers is a pain. 11078 11079 // fadd (fadd (a, a), b) -> mad 2.0, a, b 11080 if (LHS.getOpcode() == ISD::FADD) { 11081 SDValue A = LHS.getOperand(0); 11082 if (A == LHS.getOperand(1)) { 11083 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 11084 if (FusedOp != 0) { 11085 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 11086 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); 11087 } 11088 } 11089 } 11090 11091 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 11092 if (RHS.getOpcode() == ISD::FADD) { 11093 SDValue A = RHS.getOperand(0); 11094 if (A == RHS.getOperand(1)) { 11095 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 11096 if (FusedOp != 0) { 11097 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 11098 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); 11099 } 11100 } 11101 } 11102 11103 return SDValue(); 11104 } 11105 11106 SDValue SITargetLowering::performFSubCombine(SDNode *N, 11107 DAGCombinerInfo &DCI) const { 11108 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 11109 return SDValue(); 11110 11111 SelectionDAG &DAG = DCI.DAG; 11112 SDLoc SL(N); 11113 EVT VT = N->getValueType(0); 11114 assert(!VT.isVector()); 11115 11116 // Try to get the fneg to fold into the source modifier. This undoes generic 11117 // DAG combines and folds them into the mad. 11118 // 11119 // Only do this if we are not trying to support denormals. v_mad_f32 does 11120 // not support denormals ever. 11121 SDValue LHS = N->getOperand(0); 11122 SDValue RHS = N->getOperand(1); 11123 if (LHS.getOpcode() == ISD::FADD) { 11124 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 11125 SDValue A = LHS.getOperand(0); 11126 if (A == LHS.getOperand(1)) { 11127 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 11128 if (FusedOp != 0){ 11129 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 11130 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 11131 11132 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); 11133 } 11134 } 11135 } 11136 11137 if (RHS.getOpcode() == ISD::FADD) { 11138 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 11139 11140 SDValue A = RHS.getOperand(0); 11141 if (A == RHS.getOperand(1)) { 11142 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 11143 if (FusedOp != 0){ 11144 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); 11145 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); 11146 } 11147 } 11148 } 11149 11150 return SDValue(); 11151 } 11152 11153 SDValue SITargetLowering::performFMACombine(SDNode *N, 11154 DAGCombinerInfo &DCI) const { 11155 SelectionDAG &DAG = DCI.DAG; 11156 EVT VT = N->getValueType(0); 11157 SDLoc SL(N); 11158 11159 if (!Subtarget->hasDot7Insts() || VT != MVT::f32) 11160 return SDValue(); 11161 11162 // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) -> 11163 // FDOT2((V2F16)S0, (V2F16)S1, (F32)z)) 11164 SDValue Op1 = N->getOperand(0); 11165 SDValue Op2 = N->getOperand(1); 11166 SDValue FMA = N->getOperand(2); 11167 11168 if (FMA.getOpcode() != ISD::FMA || 11169 Op1.getOpcode() != ISD::FP_EXTEND || 11170 Op2.getOpcode() != ISD::FP_EXTEND) 11171 return SDValue(); 11172 11173 // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero, 11174 // regardless of the denorm mode setting. Therefore, 11175 // unsafe-fp-math/fp-contract is sufficient to allow generating fdot2. 11176 const TargetOptions &Options = DAG.getTarget().Options; 11177 if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 11178 (N->getFlags().hasAllowContract() && 11179 FMA->getFlags().hasAllowContract())) { 11180 Op1 = Op1.getOperand(0); 11181 Op2 = Op2.getOperand(0); 11182 if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 11183 Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11184 return SDValue(); 11185 11186 SDValue Vec1 = Op1.getOperand(0); 11187 SDValue Idx1 = Op1.getOperand(1); 11188 SDValue Vec2 = Op2.getOperand(0); 11189 11190 SDValue FMAOp1 = FMA.getOperand(0); 11191 SDValue FMAOp2 = FMA.getOperand(1); 11192 SDValue FMAAcc = FMA.getOperand(2); 11193 11194 if (FMAOp1.getOpcode() != ISD::FP_EXTEND || 11195 FMAOp2.getOpcode() != ISD::FP_EXTEND) 11196 return SDValue(); 11197 11198 FMAOp1 = FMAOp1.getOperand(0); 11199 FMAOp2 = FMAOp2.getOperand(0); 11200 if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 11201 FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11202 return SDValue(); 11203 11204 SDValue Vec3 = FMAOp1.getOperand(0); 11205 SDValue Vec4 = FMAOp2.getOperand(0); 11206 SDValue Idx2 = FMAOp1.getOperand(1); 11207 11208 if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) || 11209 // Idx1 and Idx2 cannot be the same. 11210 Idx1 == Idx2) 11211 return SDValue(); 11212 11213 if (Vec1 == Vec2 || Vec3 == Vec4) 11214 return SDValue(); 11215 11216 if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16) 11217 return SDValue(); 11218 11219 if ((Vec1 == Vec3 && Vec2 == Vec4) || 11220 (Vec1 == Vec4 && Vec2 == Vec3)) { 11221 return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc, 11222 DAG.getTargetConstant(0, SL, MVT::i1)); 11223 } 11224 } 11225 return SDValue(); 11226 } 11227 11228 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 11229 DAGCombinerInfo &DCI) const { 11230 SelectionDAG &DAG = DCI.DAG; 11231 SDLoc SL(N); 11232 11233 SDValue LHS = N->getOperand(0); 11234 SDValue RHS = N->getOperand(1); 11235 EVT VT = LHS.getValueType(); 11236 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 11237 11238 auto CRHS = dyn_cast<ConstantSDNode>(RHS); 11239 if (!CRHS) { 11240 CRHS = dyn_cast<ConstantSDNode>(LHS); 11241 if (CRHS) { 11242 std::swap(LHS, RHS); 11243 CC = getSetCCSwappedOperands(CC); 11244 } 11245 } 11246 11247 if (CRHS) { 11248 if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && 11249 isBoolSGPR(LHS.getOperand(0))) { 11250 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1 11251 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc 11252 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1 11253 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc 11254 if ((CRHS->isAllOnes() && 11255 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || 11256 (CRHS->isZero() && 11257 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) 11258 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), 11259 DAG.getConstant(-1, SL, MVT::i1)); 11260 if ((CRHS->isAllOnes() && 11261 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || 11262 (CRHS->isZero() && 11263 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) 11264 return LHS.getOperand(0); 11265 } 11266 11267 const APInt &CRHSVal = CRHS->getAPIntValue(); 11268 if ((CC == ISD::SETEQ || CC == ISD::SETNE) && 11269 LHS.getOpcode() == ISD::SELECT && 11270 isa<ConstantSDNode>(LHS.getOperand(1)) && 11271 isa<ConstantSDNode>(LHS.getOperand(2)) && 11272 LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) && 11273 isBoolSGPR(LHS.getOperand(0))) { 11274 // Given CT != FT: 11275 // setcc (select cc, CT, CF), CF, eq => xor cc, -1 11276 // setcc (select cc, CT, CF), CF, ne => cc 11277 // setcc (select cc, CT, CF), CT, ne => xor cc, -1 11278 // setcc (select cc, CT, CF), CT, eq => cc 11279 const APInt &CT = LHS.getConstantOperandAPInt(1); 11280 const APInt &CF = LHS.getConstantOperandAPInt(2); 11281 11282 if ((CF == CRHSVal && CC == ISD::SETEQ) || 11283 (CT == CRHSVal && CC == ISD::SETNE)) 11284 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), 11285 DAG.getConstant(-1, SL, MVT::i1)); 11286 if ((CF == CRHSVal && CC == ISD::SETNE) || 11287 (CT == CRHSVal && CC == ISD::SETEQ)) 11288 return LHS.getOperand(0); 11289 } 11290 } 11291 11292 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && 11293 VT != MVT::f16)) 11294 return SDValue(); 11295 11296 // Match isinf/isfinite pattern 11297 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 11298 // (fcmp one (fabs x), inf) -> (fp_class x, 11299 // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero) 11300 if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) { 11301 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 11302 if (!CRHS) 11303 return SDValue(); 11304 11305 const APFloat &APF = CRHS->getValueAPF(); 11306 if (APF.isInfinity() && !APF.isNegative()) { 11307 const unsigned IsInfMask = SIInstrFlags::P_INFINITY | 11308 SIInstrFlags::N_INFINITY; 11309 const unsigned IsFiniteMask = SIInstrFlags::N_ZERO | 11310 SIInstrFlags::P_ZERO | 11311 SIInstrFlags::N_NORMAL | 11312 SIInstrFlags::P_NORMAL | 11313 SIInstrFlags::N_SUBNORMAL | 11314 SIInstrFlags::P_SUBNORMAL; 11315 unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask; 11316 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 11317 DAG.getConstant(Mask, SL, MVT::i32)); 11318 } 11319 } 11320 11321 return SDValue(); 11322 } 11323 11324 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, 11325 DAGCombinerInfo &DCI) const { 11326 SelectionDAG &DAG = DCI.DAG; 11327 SDLoc SL(N); 11328 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 11329 11330 SDValue Src = N->getOperand(0); 11331 SDValue Shift = N->getOperand(0); 11332 11333 // TODO: Extend type shouldn't matter (assuming legal types). 11334 if (Shift.getOpcode() == ISD::ZERO_EXTEND) 11335 Shift = Shift.getOperand(0); 11336 11337 if (Shift.getOpcode() == ISD::SRL || Shift.getOpcode() == ISD::SHL) { 11338 // cvt_f32_ubyte1 (shl x, 8) -> cvt_f32_ubyte0 x 11339 // cvt_f32_ubyte3 (shl x, 16) -> cvt_f32_ubyte1 x 11340 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 11341 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 11342 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 11343 if (auto *C = dyn_cast<ConstantSDNode>(Shift.getOperand(1))) { 11344 SDValue Shifted = DAG.getZExtOrTrunc(Shift.getOperand(0), 11345 SDLoc(Shift.getOperand(0)), MVT::i32); 11346 11347 unsigned ShiftOffset = 8 * Offset; 11348 if (Shift.getOpcode() == ISD::SHL) 11349 ShiftOffset -= C->getZExtValue(); 11350 else 11351 ShiftOffset += C->getZExtValue(); 11352 11353 if (ShiftOffset < 32 && (ShiftOffset % 8) == 0) { 11354 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + ShiftOffset / 8, SL, 11355 MVT::f32, Shifted); 11356 } 11357 } 11358 } 11359 11360 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11361 APInt DemandedBits = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 11362 if (TLI.SimplifyDemandedBits(Src, DemandedBits, DCI)) { 11363 // We simplified Src. If this node is not dead, visit it again so it is 11364 // folded properly. 11365 if (N->getOpcode() != ISD::DELETED_NODE) 11366 DCI.AddToWorklist(N); 11367 return SDValue(N, 0); 11368 } 11369 11370 // Handle (or x, (srl y, 8)) pattern when known bits are zero. 11371 if (SDValue DemandedSrc = 11372 TLI.SimplifyMultipleUseDemandedBits(Src, DemandedBits, DAG)) 11373 return DAG.getNode(N->getOpcode(), SL, MVT::f32, DemandedSrc); 11374 11375 return SDValue(); 11376 } 11377 11378 SDValue SITargetLowering::performClampCombine(SDNode *N, 11379 DAGCombinerInfo &DCI) const { 11380 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 11381 if (!CSrc) 11382 return SDValue(); 11383 11384 const MachineFunction &MF = DCI.DAG.getMachineFunction(); 11385 const APFloat &F = CSrc->getValueAPF(); 11386 APFloat Zero = APFloat::getZero(F.getSemantics()); 11387 if (F < Zero || 11388 (F.isNaN() && MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) { 11389 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0)); 11390 } 11391 11392 APFloat One(F.getSemantics(), "1.0"); 11393 if (F > One) 11394 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0)); 11395 11396 return SDValue(CSrc, 0); 11397 } 11398 11399 11400 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 11401 DAGCombinerInfo &DCI) const { 11402 if (getTargetMachine().getOptLevel() == CodeGenOpt::None) 11403 return SDValue(); 11404 switch (N->getOpcode()) { 11405 case ISD::ADD: 11406 return performAddCombine(N, DCI); 11407 case ISD::SUB: 11408 return performSubCombine(N, DCI); 11409 case ISD::ADDCARRY: 11410 case ISD::SUBCARRY: 11411 return performAddCarrySubCarryCombine(N, DCI); 11412 case ISD::FADD: 11413 return performFAddCombine(N, DCI); 11414 case ISD::FSUB: 11415 return performFSubCombine(N, DCI); 11416 case ISD::SETCC: 11417 return performSetCCCombine(N, DCI); 11418 case ISD::FMAXNUM: 11419 case ISD::FMINNUM: 11420 case ISD::FMAXNUM_IEEE: 11421 case ISD::FMINNUM_IEEE: 11422 case ISD::SMAX: 11423 case ISD::SMIN: 11424 case ISD::UMAX: 11425 case ISD::UMIN: 11426 case AMDGPUISD::FMIN_LEGACY: 11427 case AMDGPUISD::FMAX_LEGACY: 11428 return performMinMaxCombine(N, DCI); 11429 case ISD::FMA: 11430 return performFMACombine(N, DCI); 11431 case ISD::AND: 11432 return performAndCombine(N, DCI); 11433 case ISD::OR: 11434 return performOrCombine(N, DCI); 11435 case ISD::XOR: 11436 return performXorCombine(N, DCI); 11437 case ISD::ZERO_EXTEND: 11438 return performZeroExtendCombine(N, DCI); 11439 case ISD::SIGN_EXTEND_INREG: 11440 return performSignExtendInRegCombine(N , DCI); 11441 case AMDGPUISD::FP_CLASS: 11442 return performClassCombine(N, DCI); 11443 case ISD::FCANONICALIZE: 11444 return performFCanonicalizeCombine(N, DCI); 11445 case AMDGPUISD::RCP: 11446 return performRcpCombine(N, DCI); 11447 case AMDGPUISD::FRACT: 11448 case AMDGPUISD::RSQ: 11449 case AMDGPUISD::RCP_LEGACY: 11450 case AMDGPUISD::RCP_IFLAG: 11451 case AMDGPUISD::RSQ_CLAMP: 11452 case AMDGPUISD::LDEXP: { 11453 // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted 11454 SDValue Src = N->getOperand(0); 11455 if (Src.isUndef()) 11456 return Src; 11457 break; 11458 } 11459 case ISD::SINT_TO_FP: 11460 case ISD::UINT_TO_FP: 11461 return performUCharToFloatCombine(N, DCI); 11462 case AMDGPUISD::CVT_F32_UBYTE0: 11463 case AMDGPUISD::CVT_F32_UBYTE1: 11464 case AMDGPUISD::CVT_F32_UBYTE2: 11465 case AMDGPUISD::CVT_F32_UBYTE3: 11466 return performCvtF32UByteNCombine(N, DCI); 11467 case AMDGPUISD::FMED3: 11468 return performFMed3Combine(N, DCI); 11469 case AMDGPUISD::CVT_PKRTZ_F16_F32: 11470 return performCvtPkRTZCombine(N, DCI); 11471 case AMDGPUISD::CLAMP: 11472 return performClampCombine(N, DCI); 11473 case ISD::SCALAR_TO_VECTOR: { 11474 SelectionDAG &DAG = DCI.DAG; 11475 EVT VT = N->getValueType(0); 11476 11477 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) 11478 if (VT == MVT::v2i16 || VT == MVT::v2f16) { 11479 SDLoc SL(N); 11480 SDValue Src = N->getOperand(0); 11481 EVT EltVT = Src.getValueType(); 11482 if (EltVT == MVT::f16) 11483 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); 11484 11485 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); 11486 return DAG.getNode(ISD::BITCAST, SL, VT, Ext); 11487 } 11488 11489 break; 11490 } 11491 case ISD::EXTRACT_VECTOR_ELT: 11492 return performExtractVectorEltCombine(N, DCI); 11493 case ISD::INSERT_VECTOR_ELT: 11494 return performInsertVectorEltCombine(N, DCI); 11495 case ISD::LOAD: { 11496 if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI)) 11497 return Widended; 11498 LLVM_FALLTHROUGH; 11499 } 11500 default: { 11501 if (!DCI.isBeforeLegalize()) { 11502 if (MemSDNode *MemNode = dyn_cast<MemSDNode>(N)) 11503 return performMemSDNodeCombine(MemNode, DCI); 11504 } 11505 11506 break; 11507 } 11508 } 11509 11510 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 11511 } 11512 11513 /// Helper function for adjustWritemask 11514 static unsigned SubIdx2Lane(unsigned Idx) { 11515 switch (Idx) { 11516 default: return ~0u; 11517 case AMDGPU::sub0: return 0; 11518 case AMDGPU::sub1: return 1; 11519 case AMDGPU::sub2: return 2; 11520 case AMDGPU::sub3: return 3; 11521 case AMDGPU::sub4: return 4; // Possible with TFE/LWE 11522 } 11523 } 11524 11525 /// Adjust the writemask of MIMG instructions 11526 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, 11527 SelectionDAG &DAG) const { 11528 unsigned Opcode = Node->getMachineOpcode(); 11529 11530 // Subtract 1 because the vdata output is not a MachineSDNode operand. 11531 int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1; 11532 if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx)) 11533 return Node; // not implemented for D16 11534 11535 SDNode *Users[5] = { nullptr }; 11536 unsigned Lane = 0; 11537 unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1; 11538 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 11539 unsigned NewDmask = 0; 11540 unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1; 11541 unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1; 11542 bool UsesTFC = ((int(TFEIdx) >= 0 && Node->getConstantOperandVal(TFEIdx)) || 11543 Node->getConstantOperandVal(LWEIdx)) 11544 ? true 11545 : false; 11546 unsigned TFCLane = 0; 11547 bool HasChain = Node->getNumValues() > 1; 11548 11549 if (OldDmask == 0) { 11550 // These are folded out, but on the chance it happens don't assert. 11551 return Node; 11552 } 11553 11554 unsigned OldBitsSet = countPopulation(OldDmask); 11555 // Work out which is the TFE/LWE lane if that is enabled. 11556 if (UsesTFC) { 11557 TFCLane = OldBitsSet; 11558 } 11559 11560 // Try to figure out the used register components 11561 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 11562 I != E; ++I) { 11563 11564 // Don't look at users of the chain. 11565 if (I.getUse().getResNo() != 0) 11566 continue; 11567 11568 // Abort if we can't understand the usage 11569 if (!I->isMachineOpcode() || 11570 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 11571 return Node; 11572 11573 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. 11574 // Note that subregs are packed, i.e. Lane==0 is the first bit set 11575 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 11576 // set, etc. 11577 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 11578 if (Lane == ~0u) 11579 return Node; 11580 11581 // Check if the use is for the TFE/LWE generated result at VGPRn+1. 11582 if (UsesTFC && Lane == TFCLane) { 11583 Users[Lane] = *I; 11584 } else { 11585 // Set which texture component corresponds to the lane. 11586 unsigned Comp; 11587 for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) { 11588 Comp = countTrailingZeros(Dmask); 11589 Dmask &= ~(1 << Comp); 11590 } 11591 11592 // Abort if we have more than one user per component. 11593 if (Users[Lane]) 11594 return Node; 11595 11596 Users[Lane] = *I; 11597 NewDmask |= 1 << Comp; 11598 } 11599 } 11600 11601 // Don't allow 0 dmask, as hardware assumes one channel enabled. 11602 bool NoChannels = !NewDmask; 11603 if (NoChannels) { 11604 if (!UsesTFC) { 11605 // No uses of the result and not using TFC. Then do nothing. 11606 return Node; 11607 } 11608 // If the original dmask has one channel - then nothing to do 11609 if (OldBitsSet == 1) 11610 return Node; 11611 // Use an arbitrary dmask - required for the instruction to work 11612 NewDmask = 1; 11613 } 11614 // Abort if there's no change 11615 if (NewDmask == OldDmask) 11616 return Node; 11617 11618 unsigned BitsSet = countPopulation(NewDmask); 11619 11620 // Check for TFE or LWE - increase the number of channels by one to account 11621 // for the extra return value 11622 // This will need adjustment for D16 if this is also included in 11623 // adjustWriteMask (this function) but at present D16 are excluded. 11624 unsigned NewChannels = BitsSet + UsesTFC; 11625 11626 int NewOpcode = 11627 AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels); 11628 assert(NewOpcode != -1 && 11629 NewOpcode != static_cast<int>(Node->getMachineOpcode()) && 11630 "failed to find equivalent MIMG op"); 11631 11632 // Adjust the writemask in the node 11633 SmallVector<SDValue, 12> Ops; 11634 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 11635 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 11636 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 11637 11638 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT(); 11639 11640 MVT ResultVT = NewChannels == 1 ? 11641 SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 : 11642 NewChannels == 5 ? 8 : NewChannels); 11643 SDVTList NewVTList = HasChain ? 11644 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT); 11645 11646 11647 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node), 11648 NewVTList, Ops); 11649 11650 if (HasChain) { 11651 // Update chain. 11652 DAG.setNodeMemRefs(NewNode, Node->memoperands()); 11653 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1)); 11654 } 11655 11656 if (NewChannels == 1) { 11657 assert(Node->hasNUsesOfValue(1, 0)); 11658 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY, 11659 SDLoc(Node), Users[Lane]->getValueType(0), 11660 SDValue(NewNode, 0)); 11661 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 11662 return nullptr; 11663 } 11664 11665 // Update the users of the node with the new indices 11666 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) { 11667 SDNode *User = Users[i]; 11668 if (!User) { 11669 // Handle the special case of NoChannels. We set NewDmask to 1 above, but 11670 // Users[0] is still nullptr because channel 0 doesn't really have a use. 11671 if (i || !NoChannels) 11672 continue; 11673 } else { 11674 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 11675 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op); 11676 } 11677 11678 switch (Idx) { 11679 default: break; 11680 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 11681 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 11682 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 11683 case AMDGPU::sub3: Idx = AMDGPU::sub4; break; 11684 } 11685 } 11686 11687 DAG.RemoveDeadNode(Node); 11688 return nullptr; 11689 } 11690 11691 static bool isFrameIndexOp(SDValue Op) { 11692 if (Op.getOpcode() == ISD::AssertZext) 11693 Op = Op.getOperand(0); 11694 11695 return isa<FrameIndexSDNode>(Op); 11696 } 11697 11698 /// Legalize target independent instructions (e.g. INSERT_SUBREG) 11699 /// with frame index operands. 11700 /// LLVM assumes that inputs are to these instructions are registers. 11701 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 11702 SelectionDAG &DAG) const { 11703 if (Node->getOpcode() == ISD::CopyToReg) { 11704 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); 11705 SDValue SrcVal = Node->getOperand(2); 11706 11707 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have 11708 // to try understanding copies to physical registers. 11709 if (SrcVal.getValueType() == MVT::i1 && DestReg->getReg().isPhysical()) { 11710 SDLoc SL(Node); 11711 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 11712 SDValue VReg = DAG.getRegister( 11713 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); 11714 11715 SDNode *Glued = Node->getGluedNode(); 11716 SDValue ToVReg 11717 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, 11718 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); 11719 SDValue ToResultReg 11720 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), 11721 VReg, ToVReg.getValue(1)); 11722 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); 11723 DAG.RemoveDeadNode(Node); 11724 return ToResultReg.getNode(); 11725 } 11726 } 11727 11728 SmallVector<SDValue, 8> Ops; 11729 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 11730 if (!isFrameIndexOp(Node->getOperand(i))) { 11731 Ops.push_back(Node->getOperand(i)); 11732 continue; 11733 } 11734 11735 SDLoc DL(Node); 11736 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 11737 Node->getOperand(i).getValueType(), 11738 Node->getOperand(i)), 0)); 11739 } 11740 11741 return DAG.UpdateNodeOperands(Node, Ops); 11742 } 11743 11744 /// Fold the instructions after selecting them. 11745 /// Returns null if users were already updated. 11746 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 11747 SelectionDAG &DAG) const { 11748 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 11749 unsigned Opcode = Node->getMachineOpcode(); 11750 11751 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 11752 !TII->isGather4(Opcode) && 11753 AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) != -1) { 11754 return adjustWritemask(Node, DAG); 11755 } 11756 11757 if (Opcode == AMDGPU::INSERT_SUBREG || 11758 Opcode == AMDGPU::REG_SEQUENCE) { 11759 legalizeTargetIndependentNode(Node, DAG); 11760 return Node; 11761 } 11762 11763 switch (Opcode) { 11764 case AMDGPU::V_DIV_SCALE_F32_e64: 11765 case AMDGPU::V_DIV_SCALE_F64_e64: { 11766 // Satisfy the operand register constraint when one of the inputs is 11767 // undefined. Ordinarily each undef value will have its own implicit_def of 11768 // a vreg, so force these to use a single register. 11769 SDValue Src0 = Node->getOperand(1); 11770 SDValue Src1 = Node->getOperand(3); 11771 SDValue Src2 = Node->getOperand(5); 11772 11773 if ((Src0.isMachineOpcode() && 11774 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) && 11775 (Src0 == Src1 || Src0 == Src2)) 11776 break; 11777 11778 MVT VT = Src0.getValueType().getSimpleVT(); 11779 const TargetRegisterClass *RC = 11780 getRegClassFor(VT, Src0.getNode()->isDivergent()); 11781 11782 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 11783 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT); 11784 11785 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node), 11786 UndefReg, Src0, SDValue()); 11787 11788 // src0 must be the same register as src1 or src2, even if the value is 11789 // undefined, so make sure we don't violate this constraint. 11790 if (Src0.isMachineOpcode() && 11791 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { 11792 if (Src1.isMachineOpcode() && 11793 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 11794 Src0 = Src1; 11795 else if (Src2.isMachineOpcode() && 11796 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 11797 Src0 = Src2; 11798 else { 11799 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF); 11800 Src0 = UndefReg; 11801 Src1 = UndefReg; 11802 } 11803 } else 11804 break; 11805 11806 SmallVector<SDValue, 9> Ops(Node->op_begin(), Node->op_end()); 11807 Ops[1] = Src0; 11808 Ops[3] = Src1; 11809 Ops[5] = Src2; 11810 Ops.push_back(ImpDef.getValue(1)); 11811 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 11812 } 11813 default: 11814 break; 11815 } 11816 11817 return Node; 11818 } 11819 11820 // Any MIMG instructions that use tfe or lwe require an initialization of the 11821 // result register that will be written in the case of a memory access failure. 11822 // The required code is also added to tie this init code to the result of the 11823 // img instruction. 11824 void SITargetLowering::AddIMGInit(MachineInstr &MI) const { 11825 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 11826 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 11827 MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); 11828 MachineBasicBlock &MBB = *MI.getParent(); 11829 11830 MachineOperand *TFE = TII->getNamedOperand(MI, AMDGPU::OpName::tfe); 11831 MachineOperand *LWE = TII->getNamedOperand(MI, AMDGPU::OpName::lwe); 11832 MachineOperand *D16 = TII->getNamedOperand(MI, AMDGPU::OpName::d16); 11833 11834 if (!TFE && !LWE) // intersect_ray 11835 return; 11836 11837 unsigned TFEVal = TFE ? TFE->getImm() : 0; 11838 unsigned LWEVal = LWE->getImm(); 11839 unsigned D16Val = D16 ? D16->getImm() : 0; 11840 11841 if (!TFEVal && !LWEVal) 11842 return; 11843 11844 // At least one of TFE or LWE are non-zero 11845 // We have to insert a suitable initialization of the result value and 11846 // tie this to the dest of the image instruction. 11847 11848 const DebugLoc &DL = MI.getDebugLoc(); 11849 11850 int DstIdx = 11851 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 11852 11853 // Calculate which dword we have to initialize to 0. 11854 MachineOperand *MO_Dmask = TII->getNamedOperand(MI, AMDGPU::OpName::dmask); 11855 11856 // check that dmask operand is found. 11857 assert(MO_Dmask && "Expected dmask operand in instruction"); 11858 11859 unsigned dmask = MO_Dmask->getImm(); 11860 // Determine the number of active lanes taking into account the 11861 // Gather4 special case 11862 unsigned ActiveLanes = TII->isGather4(MI) ? 4 : countPopulation(dmask); 11863 11864 bool Packed = !Subtarget->hasUnpackedD16VMem(); 11865 11866 unsigned InitIdx = 11867 D16Val && Packed ? ((ActiveLanes + 1) >> 1) + 1 : ActiveLanes + 1; 11868 11869 // Abandon attempt if the dst size isn't large enough 11870 // - this is in fact an error but this is picked up elsewhere and 11871 // reported correctly. 11872 uint32_t DstSize = TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32; 11873 if (DstSize < InitIdx) 11874 return; 11875 11876 // Create a register for the initialization value. 11877 Register PrevDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx)); 11878 unsigned NewDst = 0; // Final initialized value will be in here 11879 11880 // If PRTStrictNull feature is enabled (the default) then initialize 11881 // all the result registers to 0, otherwise just the error indication 11882 // register (VGPRn+1) 11883 unsigned SizeLeft = Subtarget->usePRTStrictNull() ? InitIdx : 1; 11884 unsigned CurrIdx = Subtarget->usePRTStrictNull() ? 0 : (InitIdx - 1); 11885 11886 BuildMI(MBB, MI, DL, TII->get(AMDGPU::IMPLICIT_DEF), PrevDst); 11887 for (; SizeLeft; SizeLeft--, CurrIdx++) { 11888 NewDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx)); 11889 // Initialize dword 11890 Register SubReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 11891 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), SubReg) 11892 .addImm(0); 11893 // Insert into the super-reg 11894 BuildMI(MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewDst) 11895 .addReg(PrevDst) 11896 .addReg(SubReg) 11897 .addImm(SIRegisterInfo::getSubRegFromChannel(CurrIdx)); 11898 11899 PrevDst = NewDst; 11900 } 11901 11902 // Add as an implicit operand 11903 MI.addOperand(MachineOperand::CreateReg(NewDst, false, true)); 11904 11905 // Tie the just added implicit operand to the dst 11906 MI.tieOperands(DstIdx, MI.getNumOperands() - 1); 11907 } 11908 11909 /// Assign the register class depending on the number of 11910 /// bits set in the writemask 11911 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 11912 SDNode *Node) const { 11913 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 11914 11915 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 11916 11917 if (TII->isVOP3(MI.getOpcode())) { 11918 // Make sure constant bus requirements are respected. 11919 TII->legalizeOperandsVOP3(MRI, MI); 11920 11921 // Prefer VGPRs over AGPRs in mAI instructions where possible. 11922 // This saves a chain-copy of registers and better balance register 11923 // use between vgpr and agpr as agpr tuples tend to be big. 11924 if (MI.getDesc().OpInfo) { 11925 unsigned Opc = MI.getOpcode(); 11926 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 11927 for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 11928 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) { 11929 if (I == -1) 11930 break; 11931 MachineOperand &Op = MI.getOperand(I); 11932 if (!Op.isReg() || !Op.getReg().isVirtual()) 11933 continue; 11934 auto *RC = TRI->getRegClassForReg(MRI, Op.getReg()); 11935 if (!TRI->hasAGPRs(RC)) 11936 continue; 11937 auto *Src = MRI.getUniqueVRegDef(Op.getReg()); 11938 if (!Src || !Src->isCopy() || 11939 !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg())) 11940 continue; 11941 auto *NewRC = TRI->getEquivalentVGPRClass(RC); 11942 // All uses of agpr64 and agpr32 can also accept vgpr except for 11943 // v_accvgpr_read, but we do not produce agpr reads during selection, 11944 // so no use checks are needed. 11945 MRI.setRegClass(Op.getReg(), NewRC); 11946 } 11947 11948 // Resolve the rest of AV operands to AGPRs. 11949 if (auto *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2)) { 11950 if (Src2->isReg() && Src2->getReg().isVirtual()) { 11951 auto *RC = TRI->getRegClassForReg(MRI, Src2->getReg()); 11952 if (TRI->isVectorSuperClass(RC)) { 11953 auto *NewRC = TRI->getEquivalentAGPRClass(RC); 11954 MRI.setRegClass(Src2->getReg(), NewRC); 11955 if (Src2->isTied()) 11956 MRI.setRegClass(MI.getOperand(0).getReg(), NewRC); 11957 } 11958 } 11959 } 11960 } 11961 11962 return; 11963 } 11964 11965 if (TII->isMIMG(MI)) { 11966 if (!MI.mayStore()) 11967 AddIMGInit(MI); 11968 TII->enforceOperandRCAlignment(MI, AMDGPU::OpName::vaddr); 11969 } 11970 } 11971 11972 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 11973 uint64_t Val) { 11974 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 11975 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 11976 } 11977 11978 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 11979 const SDLoc &DL, 11980 SDValue Ptr) const { 11981 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 11982 11983 // Build the half of the subregister with the constants before building the 11984 // full 128-bit register. If we are building multiple resource descriptors, 11985 // this will allow CSEing of the 2-component register. 11986 const SDValue Ops0[] = { 11987 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 11988 buildSMovImm32(DAG, DL, 0), 11989 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 11990 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 11991 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 11992 }; 11993 11994 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 11995 MVT::v2i32, Ops0), 0); 11996 11997 // Combine the constants and the pointer. 11998 const SDValue Ops1[] = { 11999 DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), 12000 Ptr, 12001 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 12002 SubRegHi, 12003 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 12004 }; 12005 12006 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 12007 } 12008 12009 /// Return a resource descriptor with the 'Add TID' bit enabled 12010 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 12011 /// of the resource descriptor) to create an offset, which is added to 12012 /// the resource pointer. 12013 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 12014 SDValue Ptr, uint32_t RsrcDword1, 12015 uint64_t RsrcDword2And3) const { 12016 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 12017 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 12018 if (RsrcDword1) { 12019 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 12020 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 12021 0); 12022 } 12023 12024 SDValue DataLo = buildSMovImm32(DAG, DL, 12025 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 12026 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 12027 12028 const SDValue Ops[] = { 12029 DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), 12030 PtrLo, 12031 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 12032 PtrHi, 12033 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 12034 DataLo, 12035 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 12036 DataHi, 12037 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 12038 }; 12039 12040 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 12041 } 12042 12043 //===----------------------------------------------------------------------===// 12044 // SI Inline Assembly Support 12045 //===----------------------------------------------------------------------===// 12046 12047 std::pair<unsigned, const TargetRegisterClass *> 12048 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_, 12049 StringRef Constraint, 12050 MVT VT) const { 12051 const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(TRI_); 12052 12053 const TargetRegisterClass *RC = nullptr; 12054 if (Constraint.size() == 1) { 12055 const unsigned BitWidth = VT.getSizeInBits(); 12056 switch (Constraint[0]) { 12057 default: 12058 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 12059 case 's': 12060 case 'r': 12061 switch (BitWidth) { 12062 case 16: 12063 RC = &AMDGPU::SReg_32RegClass; 12064 break; 12065 case 64: 12066 RC = &AMDGPU::SGPR_64RegClass; 12067 break; 12068 default: 12069 RC = SIRegisterInfo::getSGPRClassForBitWidth(BitWidth); 12070 if (!RC) 12071 return std::make_pair(0U, nullptr); 12072 break; 12073 } 12074 break; 12075 case 'v': 12076 switch (BitWidth) { 12077 case 16: 12078 RC = &AMDGPU::VGPR_32RegClass; 12079 break; 12080 default: 12081 RC = TRI->getVGPRClassForBitWidth(BitWidth); 12082 if (!RC) 12083 return std::make_pair(0U, nullptr); 12084 break; 12085 } 12086 break; 12087 case 'a': 12088 if (!Subtarget->hasMAIInsts()) 12089 break; 12090 switch (BitWidth) { 12091 case 16: 12092 RC = &AMDGPU::AGPR_32RegClass; 12093 break; 12094 default: 12095 RC = TRI->getAGPRClassForBitWidth(BitWidth); 12096 if (!RC) 12097 return std::make_pair(0U, nullptr); 12098 break; 12099 } 12100 break; 12101 } 12102 // We actually support i128, i16 and f16 as inline parameters 12103 // even if they are not reported as legal 12104 if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 || 12105 VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16)) 12106 return std::make_pair(0U, RC); 12107 } 12108 12109 if (Constraint.startswith("{") && Constraint.endswith("}")) { 12110 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); 12111 if (RegName.consume_front("v")) { 12112 RC = &AMDGPU::VGPR_32RegClass; 12113 } else if (RegName.consume_front("s")) { 12114 RC = &AMDGPU::SGPR_32RegClass; 12115 } else if (RegName.consume_front("a")) { 12116 RC = &AMDGPU::AGPR_32RegClass; 12117 } 12118 12119 if (RC) { 12120 uint32_t Idx; 12121 if (RegName.consume_front("[")) { 12122 uint32_t End; 12123 bool Failed = RegName.consumeInteger(10, Idx); 12124 Failed |= !RegName.consume_front(":"); 12125 Failed |= RegName.consumeInteger(10, End); 12126 Failed |= !RegName.consume_back("]"); 12127 if (!Failed) { 12128 uint32_t Width = (End - Idx + 1) * 32; 12129 MCRegister Reg = RC->getRegister(Idx); 12130 if (SIRegisterInfo::isVGPRClass(RC)) 12131 RC = TRI->getVGPRClassForBitWidth(Width); 12132 else if (SIRegisterInfo::isSGPRClass(RC)) 12133 RC = TRI->getSGPRClassForBitWidth(Width); 12134 else if (SIRegisterInfo::isAGPRClass(RC)) 12135 RC = TRI->getAGPRClassForBitWidth(Width); 12136 if (RC) { 12137 Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, RC); 12138 return std::make_pair(Reg, RC); 12139 } 12140 } 12141 } else { 12142 bool Failed = RegName.getAsInteger(10, Idx); 12143 if (!Failed && Idx < RC->getNumRegs()) 12144 return std::make_pair(RC->getRegister(Idx), RC); 12145 } 12146 } 12147 } 12148 12149 auto Ret = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 12150 if (Ret.first) 12151 Ret.second = TRI->getPhysRegClass(Ret.first); 12152 12153 return Ret; 12154 } 12155 12156 static bool isImmConstraint(StringRef Constraint) { 12157 if (Constraint.size() == 1) { 12158 switch (Constraint[0]) { 12159 default: break; 12160 case 'I': 12161 case 'J': 12162 case 'A': 12163 case 'B': 12164 case 'C': 12165 return true; 12166 } 12167 } else if (Constraint == "DA" || 12168 Constraint == "DB") { 12169 return true; 12170 } 12171 return false; 12172 } 12173 12174 SITargetLowering::ConstraintType 12175 SITargetLowering::getConstraintType(StringRef Constraint) const { 12176 if (Constraint.size() == 1) { 12177 switch (Constraint[0]) { 12178 default: break; 12179 case 's': 12180 case 'v': 12181 case 'a': 12182 return C_RegisterClass; 12183 } 12184 } 12185 if (isImmConstraint(Constraint)) { 12186 return C_Other; 12187 } 12188 return TargetLowering::getConstraintType(Constraint); 12189 } 12190 12191 static uint64_t clearUnusedBits(uint64_t Val, unsigned Size) { 12192 if (!AMDGPU::isInlinableIntLiteral(Val)) { 12193 Val = Val & maskTrailingOnes<uint64_t>(Size); 12194 } 12195 return Val; 12196 } 12197 12198 void SITargetLowering::LowerAsmOperandForConstraint(SDValue Op, 12199 std::string &Constraint, 12200 std::vector<SDValue> &Ops, 12201 SelectionDAG &DAG) const { 12202 if (isImmConstraint(Constraint)) { 12203 uint64_t Val; 12204 if (getAsmOperandConstVal(Op, Val) && 12205 checkAsmConstraintVal(Op, Constraint, Val)) { 12206 Val = clearUnusedBits(Val, Op.getScalarValueSizeInBits()); 12207 Ops.push_back(DAG.getTargetConstant(Val, SDLoc(Op), MVT::i64)); 12208 } 12209 } else { 12210 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 12211 } 12212 } 12213 12214 bool SITargetLowering::getAsmOperandConstVal(SDValue Op, uint64_t &Val) const { 12215 unsigned Size = Op.getScalarValueSizeInBits(); 12216 if (Size > 64) 12217 return false; 12218 12219 if (Size == 16 && !Subtarget->has16BitInsts()) 12220 return false; 12221 12222 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 12223 Val = C->getSExtValue(); 12224 return true; 12225 } 12226 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 12227 Val = C->getValueAPF().bitcastToAPInt().getSExtValue(); 12228 return true; 12229 } 12230 if (BuildVectorSDNode *V = dyn_cast<BuildVectorSDNode>(Op)) { 12231 if (Size != 16 || Op.getNumOperands() != 2) 12232 return false; 12233 if (Op.getOperand(0).isUndef() || Op.getOperand(1).isUndef()) 12234 return false; 12235 if (ConstantSDNode *C = V->getConstantSplatNode()) { 12236 Val = C->getSExtValue(); 12237 return true; 12238 } 12239 if (ConstantFPSDNode *C = V->getConstantFPSplatNode()) { 12240 Val = C->getValueAPF().bitcastToAPInt().getSExtValue(); 12241 return true; 12242 } 12243 } 12244 12245 return false; 12246 } 12247 12248 bool SITargetLowering::checkAsmConstraintVal(SDValue Op, 12249 const std::string &Constraint, 12250 uint64_t Val) const { 12251 if (Constraint.size() == 1) { 12252 switch (Constraint[0]) { 12253 case 'I': 12254 return AMDGPU::isInlinableIntLiteral(Val); 12255 case 'J': 12256 return isInt<16>(Val); 12257 case 'A': 12258 return checkAsmConstraintValA(Op, Val); 12259 case 'B': 12260 return isInt<32>(Val); 12261 case 'C': 12262 return isUInt<32>(clearUnusedBits(Val, Op.getScalarValueSizeInBits())) || 12263 AMDGPU::isInlinableIntLiteral(Val); 12264 default: 12265 break; 12266 } 12267 } else if (Constraint.size() == 2) { 12268 if (Constraint == "DA") { 12269 int64_t HiBits = static_cast<int32_t>(Val >> 32); 12270 int64_t LoBits = static_cast<int32_t>(Val); 12271 return checkAsmConstraintValA(Op, HiBits, 32) && 12272 checkAsmConstraintValA(Op, LoBits, 32); 12273 } 12274 if (Constraint == "DB") { 12275 return true; 12276 } 12277 } 12278 llvm_unreachable("Invalid asm constraint"); 12279 } 12280 12281 bool SITargetLowering::checkAsmConstraintValA(SDValue Op, 12282 uint64_t Val, 12283 unsigned MaxSize) const { 12284 unsigned Size = std::min<unsigned>(Op.getScalarValueSizeInBits(), MaxSize); 12285 bool HasInv2Pi = Subtarget->hasInv2PiInlineImm(); 12286 if ((Size == 16 && AMDGPU::isInlinableLiteral16(Val, HasInv2Pi)) || 12287 (Size == 32 && AMDGPU::isInlinableLiteral32(Val, HasInv2Pi)) || 12288 (Size == 64 && AMDGPU::isInlinableLiteral64(Val, HasInv2Pi))) { 12289 return true; 12290 } 12291 return false; 12292 } 12293 12294 static int getAlignedAGPRClassID(unsigned UnalignedClassID) { 12295 switch (UnalignedClassID) { 12296 case AMDGPU::VReg_64RegClassID: 12297 return AMDGPU::VReg_64_Align2RegClassID; 12298 case AMDGPU::VReg_96RegClassID: 12299 return AMDGPU::VReg_96_Align2RegClassID; 12300 case AMDGPU::VReg_128RegClassID: 12301 return AMDGPU::VReg_128_Align2RegClassID; 12302 case AMDGPU::VReg_160RegClassID: 12303 return AMDGPU::VReg_160_Align2RegClassID; 12304 case AMDGPU::VReg_192RegClassID: 12305 return AMDGPU::VReg_192_Align2RegClassID; 12306 case AMDGPU::VReg_224RegClassID: 12307 return AMDGPU::VReg_224_Align2RegClassID; 12308 case AMDGPU::VReg_256RegClassID: 12309 return AMDGPU::VReg_256_Align2RegClassID; 12310 case AMDGPU::VReg_512RegClassID: 12311 return AMDGPU::VReg_512_Align2RegClassID; 12312 case AMDGPU::VReg_1024RegClassID: 12313 return AMDGPU::VReg_1024_Align2RegClassID; 12314 case AMDGPU::AReg_64RegClassID: 12315 return AMDGPU::AReg_64_Align2RegClassID; 12316 case AMDGPU::AReg_96RegClassID: 12317 return AMDGPU::AReg_96_Align2RegClassID; 12318 case AMDGPU::AReg_128RegClassID: 12319 return AMDGPU::AReg_128_Align2RegClassID; 12320 case AMDGPU::AReg_160RegClassID: 12321 return AMDGPU::AReg_160_Align2RegClassID; 12322 case AMDGPU::AReg_192RegClassID: 12323 return AMDGPU::AReg_192_Align2RegClassID; 12324 case AMDGPU::AReg_256RegClassID: 12325 return AMDGPU::AReg_256_Align2RegClassID; 12326 case AMDGPU::AReg_512RegClassID: 12327 return AMDGPU::AReg_512_Align2RegClassID; 12328 case AMDGPU::AReg_1024RegClassID: 12329 return AMDGPU::AReg_1024_Align2RegClassID; 12330 default: 12331 return -1; 12332 } 12333 } 12334 12335 // Figure out which registers should be reserved for stack access. Only after 12336 // the function is legalized do we know all of the non-spill stack objects or if 12337 // calls are present. 12338 void SITargetLowering::finalizeLowering(MachineFunction &MF) const { 12339 MachineRegisterInfo &MRI = MF.getRegInfo(); 12340 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 12341 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 12342 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 12343 const SIInstrInfo *TII = ST.getInstrInfo(); 12344 12345 if (Info->isEntryFunction()) { 12346 // Callable functions have fixed registers used for stack access. 12347 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); 12348 } 12349 12350 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(), 12351 Info->getStackPtrOffsetReg())); 12352 if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG) 12353 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg()); 12354 12355 // We need to worry about replacing the default register with itself in case 12356 // of MIR testcases missing the MFI. 12357 if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG) 12358 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg()); 12359 12360 if (Info->getFrameOffsetReg() != AMDGPU::FP_REG) 12361 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg()); 12362 12363 Info->limitOccupancy(MF); 12364 12365 if (ST.isWave32() && !MF.empty()) { 12366 for (auto &MBB : MF) { 12367 for (auto &MI : MBB) { 12368 TII->fixImplicitOperands(MI); 12369 } 12370 } 12371 } 12372 12373 // FIXME: This is a hack to fixup AGPR classes to use the properly aligned 12374 // classes if required. Ideally the register class constraints would differ 12375 // per-subtarget, but there's no easy way to achieve that right now. This is 12376 // not a problem for VGPRs because the correctly aligned VGPR class is implied 12377 // from using them as the register class for legal types. 12378 if (ST.needsAlignedVGPRs()) { 12379 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { 12380 const Register Reg = Register::index2VirtReg(I); 12381 const TargetRegisterClass *RC = MRI.getRegClassOrNull(Reg); 12382 if (!RC) 12383 continue; 12384 int NewClassID = getAlignedAGPRClassID(RC->getID()); 12385 if (NewClassID != -1) 12386 MRI.setRegClass(Reg, TRI->getRegClass(NewClassID)); 12387 } 12388 } 12389 12390 TargetLoweringBase::finalizeLowering(MF); 12391 } 12392 12393 void SITargetLowering::computeKnownBitsForFrameIndex( 12394 const int FI, KnownBits &Known, const MachineFunction &MF) const { 12395 TargetLowering::computeKnownBitsForFrameIndex(FI, Known, MF); 12396 12397 // Set the high bits to zero based on the maximum allowed scratch size per 12398 // wave. We can't use vaddr in MUBUF instructions if we don't know the address 12399 // calculation won't overflow, so assume the sign bit is never set. 12400 Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); 12401 } 12402 12403 static void knownBitsForWorkitemID(const GCNSubtarget &ST, GISelKnownBits &KB, 12404 KnownBits &Known, unsigned Dim) { 12405 unsigned MaxValue = 12406 ST.getMaxWorkitemID(KB.getMachineFunction().getFunction(), Dim); 12407 Known.Zero.setHighBits(countLeadingZeros(MaxValue)); 12408 } 12409 12410 void SITargetLowering::computeKnownBitsForTargetInstr( 12411 GISelKnownBits &KB, Register R, KnownBits &Known, const APInt &DemandedElts, 12412 const MachineRegisterInfo &MRI, unsigned Depth) const { 12413 const MachineInstr *MI = MRI.getVRegDef(R); 12414 switch (MI->getOpcode()) { 12415 case AMDGPU::G_INTRINSIC: { 12416 switch (MI->getIntrinsicID()) { 12417 case Intrinsic::amdgcn_workitem_id_x: 12418 knownBitsForWorkitemID(*getSubtarget(), KB, Known, 0); 12419 break; 12420 case Intrinsic::amdgcn_workitem_id_y: 12421 knownBitsForWorkitemID(*getSubtarget(), KB, Known, 1); 12422 break; 12423 case Intrinsic::amdgcn_workitem_id_z: 12424 knownBitsForWorkitemID(*getSubtarget(), KB, Known, 2); 12425 break; 12426 case Intrinsic::amdgcn_mbcnt_lo: 12427 case Intrinsic::amdgcn_mbcnt_hi: { 12428 // These return at most the wavefront size - 1. 12429 unsigned Size = MRI.getType(R).getSizeInBits(); 12430 Known.Zero.setHighBits(Size - getSubtarget()->getWavefrontSizeLog2()); 12431 break; 12432 } 12433 case Intrinsic::amdgcn_groupstaticsize: { 12434 // We can report everything over the maximum size as 0. We can't report 12435 // based on the actual size because we don't know if it's accurate or not 12436 // at any given point. 12437 Known.Zero.setHighBits(countLeadingZeros(getSubtarget()->getLocalMemorySize())); 12438 break; 12439 } 12440 } 12441 break; 12442 } 12443 case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE: 12444 Known.Zero.setHighBits(24); 12445 break; 12446 case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT: 12447 Known.Zero.setHighBits(16); 12448 break; 12449 } 12450 } 12451 12452 Align SITargetLowering::computeKnownAlignForTargetInstr( 12453 GISelKnownBits &KB, Register R, const MachineRegisterInfo &MRI, 12454 unsigned Depth) const { 12455 const MachineInstr *MI = MRI.getVRegDef(R); 12456 switch (MI->getOpcode()) { 12457 case AMDGPU::G_INTRINSIC: 12458 case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: { 12459 // FIXME: Can this move to generic code? What about the case where the call 12460 // site specifies a lower alignment? 12461 Intrinsic::ID IID = MI->getIntrinsicID(); 12462 LLVMContext &Ctx = KB.getMachineFunction().getFunction().getContext(); 12463 AttributeList Attrs = Intrinsic::getAttributes(Ctx, IID); 12464 if (MaybeAlign RetAlign = Attrs.getRetAlignment()) 12465 return *RetAlign; 12466 return Align(1); 12467 } 12468 default: 12469 return Align(1); 12470 } 12471 } 12472 12473 Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 12474 const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML); 12475 const Align CacheLineAlign = Align(64); 12476 12477 // Pre-GFX10 target did not benefit from loop alignment 12478 if (!ML || DisableLoopAlignment || 12479 (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || 12480 getSubtarget()->hasInstFwdPrefetchBug()) 12481 return PrefAlign; 12482 12483 // On GFX10 I$ is 4 x 64 bytes cache lines. 12484 // By default prefetcher keeps one cache line behind and reads two ahead. 12485 // We can modify it with S_INST_PREFETCH for larger loops to have two lines 12486 // behind and one ahead. 12487 // Therefor we can benefit from aligning loop headers if loop fits 192 bytes. 12488 // If loop fits 64 bytes it always spans no more than two cache lines and 12489 // does not need an alignment. 12490 // Else if loop is less or equal 128 bytes we do not need to modify prefetch, 12491 // Else if loop is less or equal 192 bytes we need two lines behind. 12492 12493 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 12494 const MachineBasicBlock *Header = ML->getHeader(); 12495 if (Header->getAlignment() != PrefAlign) 12496 return Header->getAlignment(); // Already processed. 12497 12498 unsigned LoopSize = 0; 12499 for (const MachineBasicBlock *MBB : ML->blocks()) { 12500 // If inner loop block is aligned assume in average half of the alignment 12501 // size to be added as nops. 12502 if (MBB != Header) 12503 LoopSize += MBB->getAlignment().value() / 2; 12504 12505 for (const MachineInstr &MI : *MBB) { 12506 LoopSize += TII->getInstSizeInBytes(MI); 12507 if (LoopSize > 192) 12508 return PrefAlign; 12509 } 12510 } 12511 12512 if (LoopSize <= 64) 12513 return PrefAlign; 12514 12515 if (LoopSize <= 128) 12516 return CacheLineAlign; 12517 12518 // If any of parent loops is surrounded by prefetch instructions do not 12519 // insert new for inner loop, which would reset parent's settings. 12520 for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) { 12521 if (MachineBasicBlock *Exit = P->getExitBlock()) { 12522 auto I = Exit->getFirstNonDebugInstr(); 12523 if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) 12524 return CacheLineAlign; 12525 } 12526 } 12527 12528 MachineBasicBlock *Pre = ML->getLoopPreheader(); 12529 MachineBasicBlock *Exit = ML->getExitBlock(); 12530 12531 if (Pre && Exit) { 12532 auto PreTerm = Pre->getFirstTerminator(); 12533 if (PreTerm == Pre->begin() || 12534 std::prev(PreTerm)->getOpcode() != AMDGPU::S_INST_PREFETCH) 12535 BuildMI(*Pre, PreTerm, DebugLoc(), TII->get(AMDGPU::S_INST_PREFETCH)) 12536 .addImm(1); // prefetch 2 lines behind PC 12537 12538 auto ExitHead = Exit->getFirstNonDebugInstr(); 12539 if (ExitHead == Exit->end() || 12540 ExitHead->getOpcode() != AMDGPU::S_INST_PREFETCH) 12541 BuildMI(*Exit, ExitHead, DebugLoc(), TII->get(AMDGPU::S_INST_PREFETCH)) 12542 .addImm(2); // prefetch 1 line behind PC 12543 } 12544 12545 return CacheLineAlign; 12546 } 12547 12548 LLVM_ATTRIBUTE_UNUSED 12549 static bool isCopyFromRegOfInlineAsm(const SDNode *N) { 12550 assert(N->getOpcode() == ISD::CopyFromReg); 12551 do { 12552 // Follow the chain until we find an INLINEASM node. 12553 N = N->getOperand(0).getNode(); 12554 if (N->getOpcode() == ISD::INLINEASM || 12555 N->getOpcode() == ISD::INLINEASM_BR) 12556 return true; 12557 } while (N->getOpcode() == ISD::CopyFromReg); 12558 return false; 12559 } 12560 12561 bool SITargetLowering::isSDNodeSourceOfDivergence( 12562 const SDNode *N, FunctionLoweringInfo *FLI, 12563 LegacyDivergenceAnalysis *KDA) const { 12564 switch (N->getOpcode()) { 12565 case ISD::CopyFromReg: { 12566 const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1)); 12567 const MachineRegisterInfo &MRI = FLI->MF->getRegInfo(); 12568 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 12569 Register Reg = R->getReg(); 12570 12571 // FIXME: Why does this need to consider isLiveIn? 12572 if (Reg.isPhysical() || MRI.isLiveIn(Reg)) 12573 return !TRI->isSGPRReg(MRI, Reg); 12574 12575 if (const Value *V = FLI->getValueFromVirtualReg(R->getReg())) 12576 return KDA->isDivergent(V); 12577 12578 assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N)); 12579 return !TRI->isSGPRReg(MRI, Reg); 12580 } 12581 case ISD::LOAD: { 12582 const LoadSDNode *L = cast<LoadSDNode>(N); 12583 unsigned AS = L->getAddressSpace(); 12584 // A flat load may access private memory. 12585 return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS; 12586 } 12587 case ISD::CALLSEQ_END: 12588 return true; 12589 case ISD::INTRINSIC_WO_CHAIN: 12590 return AMDGPU::isIntrinsicSourceOfDivergence( 12591 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()); 12592 case ISD::INTRINSIC_W_CHAIN: 12593 return AMDGPU::isIntrinsicSourceOfDivergence( 12594 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()); 12595 case AMDGPUISD::ATOMIC_CMP_SWAP: 12596 case AMDGPUISD::ATOMIC_INC: 12597 case AMDGPUISD::ATOMIC_DEC: 12598 case AMDGPUISD::ATOMIC_LOAD_FMIN: 12599 case AMDGPUISD::ATOMIC_LOAD_FMAX: 12600 case AMDGPUISD::BUFFER_ATOMIC_SWAP: 12601 case AMDGPUISD::BUFFER_ATOMIC_ADD: 12602 case AMDGPUISD::BUFFER_ATOMIC_SUB: 12603 case AMDGPUISD::BUFFER_ATOMIC_SMIN: 12604 case AMDGPUISD::BUFFER_ATOMIC_UMIN: 12605 case AMDGPUISD::BUFFER_ATOMIC_SMAX: 12606 case AMDGPUISD::BUFFER_ATOMIC_UMAX: 12607 case AMDGPUISD::BUFFER_ATOMIC_AND: 12608 case AMDGPUISD::BUFFER_ATOMIC_OR: 12609 case AMDGPUISD::BUFFER_ATOMIC_XOR: 12610 case AMDGPUISD::BUFFER_ATOMIC_INC: 12611 case AMDGPUISD::BUFFER_ATOMIC_DEC: 12612 case AMDGPUISD::BUFFER_ATOMIC_CMPSWAP: 12613 case AMDGPUISD::BUFFER_ATOMIC_CSUB: 12614 case AMDGPUISD::BUFFER_ATOMIC_FADD: 12615 case AMDGPUISD::BUFFER_ATOMIC_FMIN: 12616 case AMDGPUISD::BUFFER_ATOMIC_FMAX: 12617 // Target-specific read-modify-write atomics are sources of divergence. 12618 return true; 12619 default: 12620 if (auto *A = dyn_cast<AtomicSDNode>(N)) { 12621 // Generic read-modify-write atomics are sources of divergence. 12622 return A->readMem() && A->writeMem(); 12623 } 12624 return false; 12625 } 12626 } 12627 12628 bool SITargetLowering::denormalsEnabledForType(const SelectionDAG &DAG, 12629 EVT VT) const { 12630 switch (VT.getScalarType().getSimpleVT().SimpleTy) { 12631 case MVT::f32: 12632 return hasFP32Denormals(DAG.getMachineFunction()); 12633 case MVT::f64: 12634 case MVT::f16: 12635 return hasFP64FP16Denormals(DAG.getMachineFunction()); 12636 default: 12637 return false; 12638 } 12639 } 12640 12641 bool SITargetLowering::denormalsEnabledForType(LLT Ty, 12642 MachineFunction &MF) const { 12643 switch (Ty.getScalarSizeInBits()) { 12644 case 32: 12645 return hasFP32Denormals(MF); 12646 case 64: 12647 case 16: 12648 return hasFP64FP16Denormals(MF); 12649 default: 12650 return false; 12651 } 12652 } 12653 12654 bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 12655 const SelectionDAG &DAG, 12656 bool SNaN, 12657 unsigned Depth) const { 12658 if (Op.getOpcode() == AMDGPUISD::CLAMP) { 12659 const MachineFunction &MF = DAG.getMachineFunction(); 12660 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 12661 12662 if (Info->getMode().DX10Clamp) 12663 return true; // Clamped to 0. 12664 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 12665 } 12666 12667 return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG, 12668 SNaN, Depth); 12669 } 12670 12671 // Global FP atomic instructions have a hardcoded FP mode and do not support 12672 // FP32 denormals, and only support v2f16 denormals. 12673 static bool fpModeMatchesGlobalFPAtomicMode(const AtomicRMWInst *RMW) { 12674 const fltSemantics &Flt = RMW->getType()->getScalarType()->getFltSemantics(); 12675 auto DenormMode = RMW->getParent()->getParent()->getDenormalMode(Flt); 12676 if (&Flt == &APFloat::IEEEsingle()) 12677 return DenormMode == DenormalMode::getPreserveSign(); 12678 return DenormMode == DenormalMode::getIEEE(); 12679 } 12680 12681 TargetLowering::AtomicExpansionKind 12682 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { 12683 unsigned AS = RMW->getPointerAddressSpace(); 12684 if (AS == AMDGPUAS::PRIVATE_ADDRESS) 12685 return AtomicExpansionKind::NotAtomic; 12686 12687 auto ReportUnsafeHWInst = [&](TargetLowering::AtomicExpansionKind Kind) { 12688 OptimizationRemarkEmitter ORE(RMW->getFunction()); 12689 LLVMContext &Ctx = RMW->getFunction()->getContext(); 12690 SmallVector<StringRef> SSNs; 12691 Ctx.getSyncScopeNames(SSNs); 12692 auto MemScope = SSNs[RMW->getSyncScopeID()].empty() 12693 ? "system" 12694 : SSNs[RMW->getSyncScopeID()]; 12695 ORE.emit([&]() { 12696 return OptimizationRemark(DEBUG_TYPE, "Passed", RMW) 12697 << "Hardware instruction generated for atomic " 12698 << RMW->getOperationName(RMW->getOperation()) 12699 << " operation at memory scope " << MemScope 12700 << " due to an unsafe request."; 12701 }); 12702 return Kind; 12703 }; 12704 12705 switch (RMW->getOperation()) { 12706 case AtomicRMWInst::FAdd: { 12707 Type *Ty = RMW->getType(); 12708 12709 // We don't have a way to support 16-bit atomics now, so just leave them 12710 // as-is. 12711 if (Ty->isHalfTy()) 12712 return AtomicExpansionKind::None; 12713 12714 if (!Ty->isFloatTy() && (!Subtarget->hasGFX90AInsts() || !Ty->isDoubleTy())) 12715 return AtomicExpansionKind::CmpXChg; 12716 12717 if ((AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) && 12718 Subtarget->hasAtomicFaddNoRtnInsts()) { 12719 if (Subtarget->hasGFX940Insts()) 12720 return AtomicExpansionKind::None; 12721 12722 // The amdgpu-unsafe-fp-atomics attribute enables generation of unsafe 12723 // floating point atomic instructions. May generate more efficient code, 12724 // but may not respect rounding and denormal modes, and may give incorrect 12725 // results for certain memory destinations. 12726 if (RMW->getFunction() 12727 ->getFnAttribute("amdgpu-unsafe-fp-atomics") 12728 .getValueAsString() != "true") 12729 return AtomicExpansionKind::CmpXChg; 12730 12731 if (Subtarget->hasGFX90AInsts()) { 12732 if (Ty->isFloatTy() && AS == AMDGPUAS::FLAT_ADDRESS) 12733 return AtomicExpansionKind::CmpXChg; 12734 12735 auto SSID = RMW->getSyncScopeID(); 12736 if (SSID == SyncScope::System || 12737 SSID == RMW->getContext().getOrInsertSyncScopeID("one-as")) 12738 return AtomicExpansionKind::CmpXChg; 12739 12740 return ReportUnsafeHWInst(AtomicExpansionKind::None); 12741 } 12742 12743 if (AS == AMDGPUAS::FLAT_ADDRESS) 12744 return AtomicExpansionKind::CmpXChg; 12745 12746 return RMW->use_empty() ? ReportUnsafeHWInst(AtomicExpansionKind::None) 12747 : AtomicExpansionKind::CmpXChg; 12748 } 12749 12750 // DS FP atomics do respect the denormal mode, but the rounding mode is 12751 // fixed to round-to-nearest-even. 12752 // The only exception is DS_ADD_F64 which never flushes regardless of mode. 12753 if (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomicAdd()) { 12754 if (!Ty->isDoubleTy()) 12755 return AtomicExpansionKind::None; 12756 12757 if (fpModeMatchesGlobalFPAtomicMode(RMW)) 12758 return AtomicExpansionKind::None; 12759 12760 return RMW->getFunction() 12761 ->getFnAttribute("amdgpu-unsafe-fp-atomics") 12762 .getValueAsString() == "true" 12763 ? ReportUnsafeHWInst(AtomicExpansionKind::None) 12764 : AtomicExpansionKind::CmpXChg; 12765 } 12766 12767 return AtomicExpansionKind::CmpXChg; 12768 } 12769 default: 12770 break; 12771 } 12772 12773 return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW); 12774 } 12775 12776 TargetLowering::AtomicExpansionKind 12777 SITargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { 12778 return LI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS 12779 ? AtomicExpansionKind::NotAtomic 12780 : AtomicExpansionKind::None; 12781 } 12782 12783 TargetLowering::AtomicExpansionKind 12784 SITargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { 12785 return SI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS 12786 ? AtomicExpansionKind::NotAtomic 12787 : AtomicExpansionKind::None; 12788 } 12789 12790 TargetLowering::AtomicExpansionKind 12791 SITargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CmpX) const { 12792 return CmpX->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS 12793 ? AtomicExpansionKind::NotAtomic 12794 : AtomicExpansionKind::None; 12795 } 12796 12797 const TargetRegisterClass * 12798 SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { 12799 const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false); 12800 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 12801 if (RC == &AMDGPU::VReg_1RegClass && !isDivergent) 12802 return Subtarget->getWavefrontSize() == 64 ? &AMDGPU::SReg_64RegClass 12803 : &AMDGPU::SReg_32RegClass; 12804 if (!TRI->isSGPRClass(RC) && !isDivergent) 12805 return TRI->getEquivalentSGPRClass(RC); 12806 else if (TRI->isSGPRClass(RC) && isDivergent) 12807 return TRI->getEquivalentVGPRClass(RC); 12808 12809 return RC; 12810 } 12811 12812 // FIXME: This is a workaround for DivergenceAnalysis not understanding always 12813 // uniform values (as produced by the mask results of control flow intrinsics) 12814 // used outside of divergent blocks. The phi users need to also be treated as 12815 // always uniform. 12816 static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited, 12817 unsigned WaveSize) { 12818 // FIXME: We assume we never cast the mask results of a control flow 12819 // intrinsic. 12820 // Early exit if the type won't be consistent as a compile time hack. 12821 IntegerType *IT = dyn_cast<IntegerType>(V->getType()); 12822 if (!IT || IT->getBitWidth() != WaveSize) 12823 return false; 12824 12825 if (!isa<Instruction>(V)) 12826 return false; 12827 if (!Visited.insert(V).second) 12828 return false; 12829 bool Result = false; 12830 for (auto U : V->users()) { 12831 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) { 12832 if (V == U->getOperand(1)) { 12833 switch (Intrinsic->getIntrinsicID()) { 12834 default: 12835 Result = false; 12836 break; 12837 case Intrinsic::amdgcn_if_break: 12838 case Intrinsic::amdgcn_if: 12839 case Intrinsic::amdgcn_else: 12840 Result = true; 12841 break; 12842 } 12843 } 12844 if (V == U->getOperand(0)) { 12845 switch (Intrinsic->getIntrinsicID()) { 12846 default: 12847 Result = false; 12848 break; 12849 case Intrinsic::amdgcn_end_cf: 12850 case Intrinsic::amdgcn_loop: 12851 Result = true; 12852 break; 12853 } 12854 } 12855 } else { 12856 Result = hasCFUser(U, Visited, WaveSize); 12857 } 12858 if (Result) 12859 break; 12860 } 12861 return Result; 12862 } 12863 12864 bool SITargetLowering::requiresUniformRegister(MachineFunction &MF, 12865 const Value *V) const { 12866 if (const CallInst *CI = dyn_cast<CallInst>(V)) { 12867 if (CI->isInlineAsm()) { 12868 // FIXME: This cannot give a correct answer. This should only trigger in 12869 // the case where inline asm returns mixed SGPR and VGPR results, used 12870 // outside the defining block. We don't have a specific result to 12871 // consider, so this assumes if any value is SGPR, the overall register 12872 // also needs to be SGPR. 12873 const SIRegisterInfo *SIRI = Subtarget->getRegisterInfo(); 12874 TargetLowering::AsmOperandInfoVector TargetConstraints = ParseConstraints( 12875 MF.getDataLayout(), Subtarget->getRegisterInfo(), *CI); 12876 for (auto &TC : TargetConstraints) { 12877 if (TC.Type == InlineAsm::isOutput) { 12878 ComputeConstraintToUse(TC, SDValue()); 12879 const TargetRegisterClass *RC = getRegForInlineAsmConstraint( 12880 SIRI, TC.ConstraintCode, TC.ConstraintVT).second; 12881 if (RC && SIRI->isSGPRClass(RC)) 12882 return true; 12883 } 12884 } 12885 } 12886 } 12887 SmallPtrSet<const Value *, 16> Visited; 12888 return hasCFUser(V, Visited, Subtarget->getWavefrontSize()); 12889 } 12890 12891 std::pair<InstructionCost, MVT> 12892 SITargetLowering::getTypeLegalizationCost(const DataLayout &DL, 12893 Type *Ty) const { 12894 std::pair<InstructionCost, MVT> Cost = 12895 TargetLoweringBase::getTypeLegalizationCost(DL, Ty); 12896 auto Size = DL.getTypeSizeInBits(Ty); 12897 // Maximum load or store can handle 8 dwords for scalar and 4 for 12898 // vector ALU. Let's assume anything above 8 dwords is expensive 12899 // even if legal. 12900 if (Size <= 256) 12901 return Cost; 12902 12903 Cost.first += (Size + 255) / 256; 12904 return Cost; 12905 } 12906 12907 bool SITargetLowering::hasMemSDNodeUser(SDNode *N) const { 12908 SDNode::use_iterator I = N->use_begin(), E = N->use_end(); 12909 for (; I != E; ++I) { 12910 if (MemSDNode *M = dyn_cast<MemSDNode>(*I)) { 12911 if (getBasePtrIndex(M) == I.getOperandNo()) 12912 return true; 12913 } 12914 } 12915 return false; 12916 } 12917 12918 bool SITargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0, 12919 SDValue N1) const { 12920 if (!N0.hasOneUse()) 12921 return false; 12922 // Take care of the opportunity to keep N0 uniform 12923 if (N0->isDivergent() || !N1->isDivergent()) 12924 return true; 12925 // Check if we have a good chance to form the memory access pattern with the 12926 // base and offset 12927 return (DAG.isBaseWithConstantOffset(N0) && 12928 hasMemSDNodeUser(*N0->use_begin())); 12929 } 12930 12931 MachineMemOperand::Flags 12932 SITargetLowering::getTargetMMOFlags(const Instruction &I) const { 12933 // Propagate metadata set by AMDGPUAnnotateUniformValues to the MMO of a load. 12934 if (I.getMetadata("amdgpu.noclobber")) 12935 return MONoClobber; 12936 return MachineMemOperand::MONone; 12937 } 12938