Lines Matching refs:RI

66     RI(ST), ST(ST) {  in SIInstrInfo()
506 const SIRegisterInfo &RI = TII.getRegisterInfo(); in indirectCopyToAGPR() local
514 if (!Def->definesRegister(SrcReg, &RI)) in indirectCopyToAGPR()
527 if (I->modifiesRegister(DefOp.getReg(), &RI)) in indirectCopyToAGPR()
555 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, in indirectCopyToAGPR()
572 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) in indirectCopyToAGPR()
606 const SIRegisterInfo &RI = TII.getRegisterInfo(); in expandSGPRCopy() local
607 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4); in expandSGPRCopy()
613 Register Reg = RI.getSubReg(DestReg, SubIdx); in expandSGPRCopy()
617 Register Src = RI.getSubReg(SrcReg, SubIdx); in expandSGPRCopy()
622 unsigned Channel = RI.getChannelFromSubReg(SubIdx); in expandSGPRCopy()
623 SubIdx = RI.getSubRegFromChannel(Channel, 2); in expandSGPRCopy()
628 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), RI.getSubReg(DestReg, SubIdx)) in expandSGPRCopy()
629 .addReg(RI.getSubReg(SrcReg, SubIdx)) in expandSGPRCopy()
647 LastMI->addRegisterKilled(SrcReg, &RI); in expandSGPRCopy()
654 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); in copyPhysReg()
659 ((RI.getRegSizeInBits(*RC) == 16) ^ in copyPhysReg()
660 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) { in copyPhysReg()
661 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg; in copyPhysReg()
662 MCRegister Super = RI.get32BitRegister(RegToFix); in copyPhysReg()
663 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix); in copyPhysReg()
672 RC = RI.getPhysRegClass(DestReg); in copyPhysReg()
794 const unsigned Size = RI.getRegSizeInBits(*RC); in copyPhysReg()
811 MCRegister NewDestReg = RI.get32BitRegister(DestReg); in copyPhysReg()
812 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); in copyPhysReg()
861 const TargetRegisterClass *SrcRC = RI.getPhysRegClass(SrcReg); in copyPhysReg()
862 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) { in copyPhysReg()
879 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); in copyPhysReg()
880 if (RI.isSGPRClass(RC)) { in copyPhysReg()
881 if (!RI.isSGPRClass(SrcRC)) { in copyPhysReg()
891 if (RI.hasAGPRs(RC)) { in copyPhysReg()
892 Opcode = (RI.hasVGPRs(SrcRC)) ? in copyPhysReg()
894 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(SrcRC)) { in copyPhysReg()
896 } else if ((Size % 64 == 0) && RI.hasVGPRs(RC) && in copyPhysReg()
897 (RI.isProperlyAlignedRC(*RC) && in copyPhysReg()
898 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) { in copyPhysReg()
915 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); in copyPhysReg()
919 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg); in copyPhysReg()
933 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), in copyPhysReg()
934 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS, in copyPhysReg()
937 Register DstSubReg = RI.getSubReg(DestReg, SubIdx); in copyPhysReg()
938 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx); in copyPhysReg()
955 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx)) in copyPhysReg()
956 .addReg(RI.getSubReg(SrcReg, SubIdx)); in copyPhysReg()
1019 if (RI.isSGPRClass(RegClass)) { in materializeImmediate()
1020 if (RI.getRegSizeInBits(*RegClass) > 32) { in materializeImmediate()
1029 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); in materializeImmediate()
1034 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); in materializeImmediate()
1052 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); in insertVectorSelect()
1127 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); in insertVectorSelect()
1145 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); in insertVectorSelect()
1175 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); in insertEQ()
1188 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); in insertNE()
1198 if (RI.hasAGPRs(DstRC)) in getMovOpcode()
1200 if (RI.getRegSizeInBits(*DstRC) == 32) { in getMovOpcode()
1201 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; in getMovOpcode()
1202 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { in getMovOpcode()
1204 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { in getMovOpcode()
1422 if (RI.isSGPRClass(RC)) { in storeRegToStackSlot()
1445 if (RI.spillSGPRToVGPR()) in storeRegToStackSlot()
1450 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) in storeRegToStackSlot()
1555 if (RI.isSGPRClass(RC)) { in loadRegFromStackSlot()
1569 if (RI.spillSGPRToVGPR()) in loadRegFromStackSlot()
1579 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) in loadRegFromStackSlot()
1697 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); in expandPostRAPseudo()
1698 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); in expandPostRAPseudo()
1729 !RI.isAGPR(MBB.getParent()->getRegInfo(), SrcOp.getReg())) { in expandPostRAPseudo()
1742 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) in expandPostRAPseudo()
1745 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) in expandPostRAPseudo()
1806 if (RI.hasVGPRs(EltRC)) { in expandPostRAPseudo()
1809 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64 in expandPostRAPseudo()
1821 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) in expandPostRAPseudo()
1855 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) in expandPostRAPseudo()
1894 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) in expandPostRAPseudo()
1908 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); in expandPostRAPseudo()
1909 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); in expandPostRAPseudo()
1977 MovDPP.addDef(RI.getSubReg(Dst, Sub)); in expandMovDPP64()
1995 MovDPP.addReg(RI.getSubReg(Src, Sub)); in expandMovDPP64()
2509 return RI.hasVGPRs(RC) && NumInsts <= 6; in canInsertSelect()
2527 return RI.isSGPRClass(RC); in canInsertSelect()
2546 unsigned DstSize = RI.getRegSizeInBits(*DstRC); in insertSelect()
2725 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); in FoldImmediate()
2732 if (RI.isAGPR(*MRI, DstReg)) { in FoldImmediate()
2748 DstReg = RI.get32BitRegister(DstReg); in FoldImmediate()
2788 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) in FoldImmediate()
2791 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) in FoldImmediate()
2855 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || in FoldImmediate()
2858 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) in FoldImmediate()
2872 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || in FoldImmediate()
2874 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) in FoldImmediate()
2952 Dummy0, &RI) || in checkInstOffsetsDoNotOverlap()
2954 Dummy1, &RI)) in checkInstOffsetsDoNotOverlap()
3099 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { in convertToThreeAddress()
3200 return MI.modifiesRegister(AMDGPU::EXEC, &RI) || in isSchedulingBoundary()
3278 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) in mayReadEXEC()
3282 return MI.readsRegister(AMDGPU::EXEC, &RI); in mayReadEXEC()
3293 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); in mayReadEXEC()
3446 return RI.opCanUseInlineConstant(OpInfo.OperandType); in isImmOperandLegal()
3449 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) in isImmOperandLegal()
3511 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) in canShrink()
3521 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || in canShrink()
3532 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || in canShrink()
3626 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); in usesConstantBus()
3720 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); in verifyInstruction()
3806 const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg); in verifyInstruction()
3807 const bool IsVGPR = RI.hasVGPRs(RC); in verifyInstruction()
3808 const bool IsAGPR = !IsVGPR && RI.hasAGPRs(RC); in verifyInstruction()
3811 RI.getSubRegClass(RC, MO.getSubReg()); in verifyInstruction()
3812 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg()); in verifyInstruction()
3818 if (!RC || !RI.isProperlyAlignedRC(*RC)) { in verifyInstruction()
3828 const TargetRegisterClass *RC = RI.getRegClass(RegClass); in verifyInstruction()
3854 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { in verifyInstruction()
3953 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; in verifyInstruction()
4012 return !RI.regsOverlap(SGPRUsed, SGPR); in verifyInstruction()
4162 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { in verifyInstruction()
4325 (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI, Data->getReg()))) { in verifyInstruction()
4331 (RI.isAGPR(MRI, Data->getReg()) != RI.isAGPR(MRI, Data2->getReg()))) { in verifyInstruction()
4337 if ((Dst && RI.isAGPR(MRI, Dst->getReg())) || in verifyInstruction()
4338 (Data && RI.isAGPR(MRI, Data->getReg())) || in verifyInstruction()
4339 (Data2 && RI.isAGPR(MRI, Data2->getReg()))) { in verifyInstruction()
4364 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? in getVALUOp()
4481 return RI.getRegClass(RegClass); in getRegClass()
4494 return RI.getPhysRegClass(Reg); in getOpRegClass()
4499 return RI.getRegClass(RCID); in getOpRegClass()
4508 const TargetRegisterClass *RC = RI.getRegClass(RCID); in legalizeOpWithMove()
4509 unsigned Size = RI.getRegSizeInBits(*RC); in legalizeOpWithMove()
4513 else if (RI.isSGPRClass(RC)) in legalizeOpWithMove()
4516 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); in legalizeOpWithMove()
4517 const TargetRegisterClass *VRC64 = RI.getVGPR64Class(); in legalizeOpWithMove()
4518 if (RI.getCommonSubClass(VRC64, VRC)) in legalizeOpWithMove()
4598 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); in isLegalRegOperand()
4606 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); in isLegalRegOperand()
4610 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); in isLegalRegOperand()
4635 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; in isOperandLegal()
4678 bool IsAGPR = RI.isAGPR(MRI, MO->getReg()); in isOperandLegal()
4692 RI.isAGPR(MRI, MI.getOperand(DataIdx).getReg()) != IsAGPR) in isOperandLegal()
4696 RI.isAGPR(MRI, MI.getOperand(VDstIdx).getReg()) != IsAGPR) in isOperandLegal()
4702 RI.isAGPR(MRI, MI.getOperand(Data1Idx).getReg()) != IsAGPR) in isOperandLegal()
4707 RI.isSGPRReg(MRI, MO->getReg())) in isOperandLegal()
4738 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || in legalizeOperandsVOP2()
4747 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { in legalizeOperandsVOP2()
4753 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { in legalizeOperandsVOP2()
4764 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) in legalizeOperandsVOP2()
4767 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) in legalizeOperandsVOP2()
4779 RI.isVGPR(MRI, Src1.getReg())) { in legalizeOperandsVOP2()
4851 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { in legalizeOperandsVOP3()
4857 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { in legalizeOperandsVOP3()
4897 if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) && in legalizeOperandsVOP3()
4903 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) in legalizeOperandsVOP3()
4925 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); in readlaneVGPRToSGPR()
4927 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; in readlaneVGPRToSGPR()
4929 if (RI.hasAGPRs(VRC)) { in readlaneVGPRToSGPR()
4930 VRC = RI.getEquivalentVGPRClass(VRC); in readlaneVGPRToSGPR()
4950 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); in readlaneVGPRToSGPR()
4959 MIB.addImm(RI.getSubRegFromChannel(i)); in readlaneVGPRToSGPR()
4972 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { in legalizeOperandsSMRD()
4977 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { in legalizeOperandsSMRD()
4999 if (RI.isSGPRReg(MRI, SAddr.getReg())) in moveFlatAddrToVGPR()
5072 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg()))) in legalizeOperandsFLAT()
5091 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( in legalizeGenericOperand()
5092 RI.getRegClassForReg(MRI, OpReg), OpSubReg); in legalizeGenericOperand()
5120 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && in legalizeGenericOperand()
5402 if (RI.hasVectorRegisters(OpRC)) { in legalizeOperands()
5412 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { in legalizeOperands()
5418 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) in legalizeOperands()
5419 ? RI.getEquivalentAGPRClass(SRC) in legalizeOperands()
5420 : RI.getEquivalentVGPRClass(SRC); in legalizeOperands()
5422 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) in legalizeOperands()
5423 ? RI.getEquivalentAGPRClass(VRC) in legalizeOperands()
5424 : RI.getEquivalentVGPRClass(VRC); in legalizeOperands()
5453 if (RI.hasVGPRs(DstRC)) { in legalizeOperands()
5463 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); in legalizeOperands()
5493 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) in legalizeOperands()
5506 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) in legalizeOperands()
5510 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) in legalizeOperands()
5519 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) { in legalizeOperands()
5550 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), in legalizeOperands()
5551 RI.getRegClass(RsrcRC))) { in legalizeOperands()
5580 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); in legalizeOperands()
5890 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); in moveToVALU()
5901 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( in moveToVALU()
5930 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); in moveToVALU()
6028 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { in moveToVALU()
6120 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != in lowerSelect()
6228 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); in lowerScalarXnor()
6230 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); in lowerScalarXnor()
6340 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); in splitScalar64BitUnaryOp()
6346 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); in splitScalar64BitUnaryOp()
6347 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); in splitScalar64BitUnaryOp()
6387 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); in splitScalar64BitAddSub()
6404 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); in splitScalar64BitAddSub()
6405 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); in splitScalar64BitAddSub()
6470 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); in splitScalar64BitBinaryOp()
6475 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); in splitScalar64BitBinaryOp()
6487 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); in splitScalar64BitBinaryOp()
6488 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); in splitScalar64BitBinaryOp()
6536 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { in splitScalar64BitXnor()
6577 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); in splitScalar64BitBCNT()
6681 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { in addUsersToMoveToVALUWorklist()
6770 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) { in addSCCDefUsersToVALUWorklist()
6778 User.getOperand(4).setReg(RI.getVCC()); in addSCCDefUsersToVALUWorklist()
6781 User.getOperand(5).setReg(RI.getVCC()); in addSCCDefUsersToVALUWorklist()
6802 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) in addSCCDefUsersToVALUWorklist()
6811 .addReg(RI.getVCC()); in addSCCDefUsersToVALUWorklist()
6832 if (MI.modifiesRegister(AMDGPU::VCC, &RI)) in addSCCDefsToVALUWorklist()
6834 if (MI.definesRegister(AMDGPU::SCC, &RI)) { in addSCCDefsToVALUWorklist()
6858 if (RI.hasAGPRs(SrcRC)) { in getDestEquivalentVGPRClass()
6859 if (RI.hasAGPRs(NewDstRC)) in getDestEquivalentVGPRClass()
6866 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); in getDestEquivalentVGPRClass()
6869 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); in getDestEquivalentVGPRClass()
6875 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) in getDestEquivalentVGPRClass()
6878 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); in getDestEquivalentVGPRClass()
6922 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); in findUsedSGPR()
6923 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); in findUsedSGPR()
6930 if (RI.isSGPRClass(RegRC)) in findUsedSGPR()
7189 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); in convertNonUniformIfRegion()
7216 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); in convertNonUniformLoopRegion()
7217 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); in convertNonUniformLoopRegion()
7227 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); in convertNonUniformLoopRegion()
7299 MI.modifiesRegister(AMDGPU::EXEC, &RI); in isBasicBlockPrologue()
7311 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); in getAddNoCarry()
7312 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); in getAddNoCarry()
7328 ? Register(RI.getVCC()) in getAddNoCarry()
7329 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); in getAddNoCarry()
7380 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); in isBufferSMRD()