1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the XCoreTargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "XCoreISelLowering.h" 14 #include "XCore.h" 15 #include "XCoreMachineFunctionInfo.h" 16 #include "XCoreSubtarget.h" 17 #include "XCoreTargetMachine.h" 18 #include "XCoreTargetObjectFile.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineJumpTableInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/ValueTypes.h" 26 #include "llvm/IR/CallingConv.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/GlobalAlias.h" 31 #include "llvm/IR/GlobalVariable.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/IntrinsicsXCore.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/KnownBits.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <algorithm> 39 40 using namespace llvm; 41 42 #define DEBUG_TYPE "xcore-lower" 43 44 const char *XCoreTargetLowering:: 45 getTargetNodeName(unsigned Opcode) const 46 { 47 switch ((XCoreISD::NodeType)Opcode) 48 { 49 case XCoreISD::FIRST_NUMBER : break; 50 case XCoreISD::BL : return "XCoreISD::BL"; 51 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 52 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 53 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 54 case XCoreISD::LDWSP : return "XCoreISD::LDWSP"; 55 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 56 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 57 case XCoreISD::LADD : return "XCoreISD::LADD"; 58 case XCoreISD::LSUB : return "XCoreISD::LSUB"; 59 case XCoreISD::LMUL : return "XCoreISD::LMUL"; 60 case XCoreISD::MACCU : return "XCoreISD::MACCU"; 61 case XCoreISD::MACCS : return "XCoreISD::MACCS"; 62 case XCoreISD::CRC8 : return "XCoreISD::CRC8"; 63 case XCoreISD::BR_JT : return "XCoreISD::BR_JT"; 64 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32"; 65 case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET"; 66 case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN"; 67 case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER"; 68 } 69 return nullptr; 70 } 71 72 XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM, 73 const XCoreSubtarget &Subtarget) 74 : TargetLowering(TM), TM(TM), Subtarget(Subtarget) { 75 76 // Set up the register classes. 77 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass); 78 79 // Compute derived properties from the register classes 80 computeRegisterProperties(Subtarget.getRegisterInfo()); 81 82 setStackPointerRegisterToSaveRestore(XCore::SP); 83 84 setSchedulingPreference(Sched::Source); 85 86 // Use i32 for setcc operations results (slt, sgt, ...). 87 setBooleanContents(ZeroOrOneBooleanContent); 88 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 89 90 // XCore does not have the NodeTypes below. 91 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 92 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 93 94 // 64bit 95 setOperationAction(ISD::ADD, MVT::i64, Custom); 96 setOperationAction(ISD::SUB, MVT::i64, Custom); 97 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); 98 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); 99 setOperationAction(ISD::MULHS, MVT::i32, Expand); 100 setOperationAction(ISD::MULHU, MVT::i32, Expand); 101 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 102 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 103 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 104 105 // Bit Manipulation 106 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 107 setOperationAction(ISD::ROTL , MVT::i32, Expand); 108 setOperationAction(ISD::ROTR , MVT::i32, Expand); 109 setOperationAction(ISD::BITREVERSE , MVT::i32, Legal); 110 111 setOperationAction(ISD::TRAP, MVT::Other, Legal); 112 113 // Jump tables. 114 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 115 116 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 117 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); 118 119 // Conversion of i64 -> double produces constantpool nodes 120 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 121 122 // Loads 123 for (MVT VT : MVT::integer_valuetypes()) { 124 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 125 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 126 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 127 128 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 129 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand); 130 } 131 132 // Custom expand misaligned loads / stores. 133 setOperationAction(ISD::LOAD, MVT::i32, Custom); 134 setOperationAction(ISD::STORE, MVT::i32, Custom); 135 136 // Varargs 137 setOperationAction(ISD::VAEND, MVT::Other, Expand); 138 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 139 setOperationAction(ISD::VAARG, MVT::Other, Custom); 140 setOperationAction(ISD::VASTART, MVT::Other, Custom); 141 142 // Dynamic stack 143 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 144 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 145 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 146 147 // Exception handling 148 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); 149 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 150 151 // Atomic operations 152 // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic. 153 // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP. 154 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 155 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 156 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 157 158 // TRAMPOLINE is custom lowered. 159 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 160 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 161 162 // We want to custom lower some of our intrinsics. 163 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 164 165 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4; 166 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize 167 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2; 168 169 // We have target-specific dag combine patterns for the following nodes: 170 setTargetDAGCombine(ISD::STORE); 171 setTargetDAGCombine(ISD::ADD); 172 setTargetDAGCombine(ISD::INTRINSIC_VOID); 173 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 174 175 setMinFunctionAlignment(Align(2)); 176 setPrefFunctionAlignment(Align(4)); 177 } 178 179 bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 180 if (Val.getOpcode() != ISD::LOAD) 181 return false; 182 183 EVT VT1 = Val.getValueType(); 184 if (!VT1.isSimple() || !VT1.isInteger() || 185 !VT2.isSimple() || !VT2.isInteger()) 186 return false; 187 188 switch (VT1.getSimpleVT().SimpleTy) { 189 default: break; 190 case MVT::i8: 191 return true; 192 } 193 194 return false; 195 } 196 197 SDValue XCoreTargetLowering:: 198 LowerOperation(SDValue Op, SelectionDAG &DAG) const { 199 switch (Op.getOpcode()) 200 { 201 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 202 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 203 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 204 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 205 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 206 case ISD::LOAD: return LowerLOAD(Op, DAG); 207 case ISD::STORE: return LowerSTORE(Op, DAG); 208 case ISD::VAARG: return LowerVAARG(Op, DAG); 209 case ISD::VASTART: return LowerVASTART(Op, DAG); 210 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); 211 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); 212 // FIXME: Remove these when LegalizeDAGTypes lands. 213 case ISD::ADD: 214 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 215 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 216 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 217 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 218 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 219 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 220 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 221 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); 222 case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG); 223 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG); 224 default: 225 llvm_unreachable("unimplemented operand"); 226 } 227 } 228 229 /// ReplaceNodeResults - Replace the results of node with an illegal result 230 /// type with new values built out of custom code. 231 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 232 SmallVectorImpl<SDValue>&Results, 233 SelectionDAG &DAG) const { 234 switch (N->getOpcode()) { 235 default: 236 llvm_unreachable("Don't know how to custom expand this!"); 237 case ISD::ADD: 238 case ISD::SUB: 239 Results.push_back(ExpandADDSUB(N, DAG)); 240 return; 241 } 242 } 243 244 //===----------------------------------------------------------------------===// 245 // Misc Lower Operation implementation 246 //===----------------------------------------------------------------------===// 247 248 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA, 249 const GlobalValue *GV, 250 SelectionDAG &DAG) const { 251 // FIXME there is no actual debug info here 252 SDLoc dl(GA); 253 254 if (GV->getValueType()->isFunctionTy()) 255 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 256 257 const auto *GVar = dyn_cast<GlobalVariable>(GV); 258 if ((GV->hasSection() && GV->getSection().startswith(".cp.")) || 259 (GVar && GVar->isConstant() && GV->hasLocalLinkage())) 260 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 261 262 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 263 } 264 265 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) { 266 if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small) 267 return true; 268 269 Type *ObjType = GV->getValueType(); 270 if (!ObjType->isSized()) 271 return false; 272 273 auto &DL = GV->getParent()->getDataLayout(); 274 unsigned ObjSize = DL.getTypeAllocSize(ObjType); 275 return ObjSize < CodeModelLargeSize && ObjSize != 0; 276 } 277 278 SDValue XCoreTargetLowering:: 279 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const 280 { 281 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 282 const GlobalValue *GV = GN->getGlobal(); 283 SDLoc DL(GN); 284 int64_t Offset = GN->getOffset(); 285 if (IsSmallObject(GV, *this)) { 286 // We can only fold positive offsets that are a multiple of the word size. 287 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0); 288 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset); 289 GA = getGlobalAddressWrapper(GA, GV, DAG); 290 // Handle the rest of the offset. 291 if (Offset != FoldedOffset) { 292 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32); 293 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining); 294 } 295 return GA; 296 } else { 297 // Ideally we would not fold in offset with an index <= 11. 298 Type *Ty = Type::getInt8PtrTy(*DAG.getContext()); 299 Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty); 300 Ty = Type::getInt32Ty(*DAG.getContext()); 301 Constant *Idx = ConstantInt::get(Ty, Offset); 302 Constant *GAI = ConstantExpr::getGetElementPtr( 303 Type::getInt8Ty(*DAG.getContext()), GA, Idx); 304 SDValue CP = DAG.getConstantPool(GAI, MVT::i32); 305 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, 306 DAG.getEntryNode(), CP, MachinePointerInfo()); 307 } 308 } 309 310 SDValue XCoreTargetLowering:: 311 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const 312 { 313 SDLoc DL(Op); 314 auto PtrVT = getPointerTy(DAG.getDataLayout()); 315 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 316 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT); 317 318 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result); 319 } 320 321 SDValue XCoreTargetLowering:: 322 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const 323 { 324 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 325 // FIXME there isn't really debug info here 326 SDLoc dl(CP); 327 EVT PtrVT = Op.getValueType(); 328 SDValue Res; 329 if (CP->isMachineConstantPoolEntry()) { 330 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 331 CP->getAlign(), CP->getOffset()); 332 } else { 333 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(), 334 CP->getOffset()); 335 } 336 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 337 } 338 339 unsigned XCoreTargetLowering::getJumpTableEncoding() const { 340 return MachineJumpTableInfo::EK_Inline; 341 } 342 343 SDValue XCoreTargetLowering:: 344 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const 345 { 346 SDValue Chain = Op.getOperand(0); 347 SDValue Table = Op.getOperand(1); 348 SDValue Index = Op.getOperand(2); 349 SDLoc dl(Op); 350 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 351 unsigned JTI = JT->getIndex(); 352 MachineFunction &MF = DAG.getMachineFunction(); 353 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 354 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); 355 356 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size(); 357 if (NumEntries <= 32) { 358 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index); 359 } 360 assert((NumEntries >> 31) == 0); 361 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, 362 DAG.getConstant(1, dl, MVT::i32)); 363 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT, 364 ScaledIndex); 365 } 366 367 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset( 368 const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset, 369 SelectionDAG &DAG) const { 370 auto PtrVT = getPointerTy(DAG.getDataLayout()); 371 if ((Offset & 0x3) == 0) { 372 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo()); 373 } 374 // Lower to pair of consecutive word aligned loads plus some bit shifting. 375 int32_t HighOffset = alignTo(Offset, 4); 376 int32_t LowOffset = HighOffset - 4; 377 SDValue LowAddr, HighAddr; 378 if (GlobalAddressSDNode *GASD = 379 dyn_cast<GlobalAddressSDNode>(Base.getNode())) { 380 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 381 LowOffset); 382 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 383 HighOffset); 384 } else { 385 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 386 DAG.getConstant(LowOffset, DL, MVT::i32)); 387 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 388 DAG.getConstant(HighOffset, DL, MVT::i32)); 389 } 390 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32); 391 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32); 392 393 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo()); 394 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo()); 395 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); 396 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); 397 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); 398 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 399 High.getValue(1)); 400 SDValue Ops[] = { Result, Chain }; 401 return DAG.getMergeValues(Ops, DL); 402 } 403 404 static bool isWordAligned(SDValue Value, SelectionDAG &DAG) 405 { 406 KnownBits Known = DAG.computeKnownBits(Value); 407 return Known.countMinTrailingZeros() >= 2; 408 } 409 410 SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 411 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 412 LLVMContext &Context = *DAG.getContext(); 413 LoadSDNode *LD = cast<LoadSDNode>(Op); 414 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 415 "Unexpected extension type"); 416 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 417 418 if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(), 419 LD->getMemoryVT(), *LD->getMemOperand())) 420 return SDValue(); 421 422 SDValue Chain = LD->getChain(); 423 SDValue BasePtr = LD->getBasePtr(); 424 SDLoc DL(Op); 425 426 if (!LD->isVolatile()) { 427 const GlobalValue *GV; 428 int64_t Offset = 0; 429 if (DAG.isBaseWithConstantOffset(BasePtr) && 430 isWordAligned(BasePtr->getOperand(0), DAG)) { 431 SDValue NewBasePtr = BasePtr->getOperand(0); 432 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue(); 433 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 434 Offset, DAG); 435 } 436 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) && 437 GV->getPointerAlignment(DAG.getDataLayout()) >= 4) { 438 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL, 439 BasePtr->getValueType(0)); 440 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 441 Offset, DAG); 442 } 443 } 444 445 if (LD->getAlignment() == 2) { 446 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr, 447 LD->getPointerInfo(), MVT::i16, Align(2), 448 LD->getMemOperand()->getFlags()); 449 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 450 DAG.getConstant(2, DL, MVT::i32)); 451 SDValue High = 452 DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr, 453 LD->getPointerInfo().getWithOffset(2), MVT::i16, 454 Align(2), LD->getMemOperand()->getFlags()); 455 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, 456 DAG.getConstant(16, DL, MVT::i32)); 457 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted); 458 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 459 High.getValue(1)); 460 SDValue Ops[] = { Result, Chain }; 461 return DAG.getMergeValues(Ops, DL); 462 } 463 464 // Lower to a call to __misaligned_load(BasePtr). 465 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context); 466 TargetLowering::ArgListTy Args; 467 TargetLowering::ArgListEntry Entry; 468 469 Entry.Ty = IntPtrTy; 470 Entry.Node = BasePtr; 471 Args.push_back(Entry); 472 473 TargetLowering::CallLoweringInfo CLI(DAG); 474 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee( 475 CallingConv::C, IntPtrTy, 476 DAG.getExternalSymbol("__misaligned_load", 477 getPointerTy(DAG.getDataLayout())), 478 std::move(Args)); 479 480 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 481 SDValue Ops[] = { CallResult.first, CallResult.second }; 482 return DAG.getMergeValues(Ops, DL); 483 } 484 485 SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 486 LLVMContext &Context = *DAG.getContext(); 487 StoreSDNode *ST = cast<StoreSDNode>(Op); 488 assert(!ST->isTruncatingStore() && "Unexpected store type"); 489 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 490 491 if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(), 492 ST->getMemoryVT(), *ST->getMemOperand())) 493 return SDValue(); 494 495 SDValue Chain = ST->getChain(); 496 SDValue BasePtr = ST->getBasePtr(); 497 SDValue Value = ST->getValue(); 498 SDLoc dl(Op); 499 500 if (ST->getAlignment() == 2) { 501 SDValue Low = Value; 502 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 503 DAG.getConstant(16, dl, MVT::i32)); 504 SDValue StoreLow = 505 DAG.getTruncStore(Chain, dl, Low, BasePtr, ST->getPointerInfo(), 506 MVT::i16, Align(2), ST->getMemOperand()->getFlags()); 507 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 508 DAG.getConstant(2, dl, MVT::i32)); 509 SDValue StoreHigh = DAG.getTruncStore( 510 Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2), 511 MVT::i16, Align(2), ST->getMemOperand()->getFlags()); 512 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 513 } 514 515 // Lower to a call to __misaligned_store(BasePtr, Value). 516 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context); 517 TargetLowering::ArgListTy Args; 518 TargetLowering::ArgListEntry Entry; 519 520 Entry.Ty = IntPtrTy; 521 Entry.Node = BasePtr; 522 Args.push_back(Entry); 523 524 Entry.Node = Value; 525 Args.push_back(Entry); 526 527 TargetLowering::CallLoweringInfo CLI(DAG); 528 CLI.setDebugLoc(dl).setChain(Chain).setCallee( 529 CallingConv::C, Type::getVoidTy(Context), 530 DAG.getExternalSymbol("__misaligned_store", 531 getPointerTy(DAG.getDataLayout())), 532 std::move(Args)); 533 534 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 535 return CallResult.second; 536 } 537 538 SDValue XCoreTargetLowering:: 539 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 540 { 541 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && 542 "Unexpected operand to lower!"); 543 SDLoc dl(Op); 544 SDValue LHS = Op.getOperand(0); 545 SDValue RHS = Op.getOperand(1); 546 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 547 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 548 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero, 549 LHS, RHS); 550 SDValue Lo(Hi.getNode(), 1); 551 SDValue Ops[] = { Lo, Hi }; 552 return DAG.getMergeValues(Ops, dl); 553 } 554 555 SDValue XCoreTargetLowering:: 556 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 557 { 558 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && 559 "Unexpected operand to lower!"); 560 SDLoc dl(Op); 561 SDValue LHS = Op.getOperand(0); 562 SDValue RHS = Op.getOperand(1); 563 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 564 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 565 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS, 566 Zero, Zero); 567 SDValue Lo(Hi.getNode(), 1); 568 SDValue Ops[] = { Lo, Hi }; 569 return DAG.getMergeValues(Ops, dl); 570 } 571 572 /// isADDADDMUL - Return whether Op is in a form that is equivalent to 573 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then 574 /// each intermediate result in the calculation must also have a single use. 575 /// If the Op is in the correct form the constituent parts are written to Mul0, 576 /// Mul1, Addend0 and Addend1. 577 static bool 578 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, 579 SDValue &Addend1, bool requireIntermediatesHaveOneUse) 580 { 581 if (Op.getOpcode() != ISD::ADD) 582 return false; 583 SDValue N0 = Op.getOperand(0); 584 SDValue N1 = Op.getOperand(1); 585 SDValue AddOp; 586 SDValue OtherOp; 587 if (N0.getOpcode() == ISD::ADD) { 588 AddOp = N0; 589 OtherOp = N1; 590 } else if (N1.getOpcode() == ISD::ADD) { 591 AddOp = N1; 592 OtherOp = N0; 593 } else { 594 return false; 595 } 596 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse()) 597 return false; 598 if (OtherOp.getOpcode() == ISD::MUL) { 599 // add(add(a,b),mul(x,y)) 600 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse()) 601 return false; 602 Mul0 = OtherOp.getOperand(0); 603 Mul1 = OtherOp.getOperand(1); 604 Addend0 = AddOp.getOperand(0); 605 Addend1 = AddOp.getOperand(1); 606 return true; 607 } 608 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) { 609 // add(add(mul(x,y),a),b) 610 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse()) 611 return false; 612 Mul0 = AddOp.getOperand(0).getOperand(0); 613 Mul1 = AddOp.getOperand(0).getOperand(1); 614 Addend0 = AddOp.getOperand(1); 615 Addend1 = OtherOp; 616 return true; 617 } 618 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) { 619 // add(add(a,mul(x,y)),b) 620 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse()) 621 return false; 622 Mul0 = AddOp.getOperand(1).getOperand(0); 623 Mul1 = AddOp.getOperand(1).getOperand(1); 624 Addend0 = AddOp.getOperand(0); 625 Addend1 = OtherOp; 626 return true; 627 } 628 return false; 629 } 630 631 SDValue XCoreTargetLowering:: 632 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const 633 { 634 SDValue Mul; 635 SDValue Other; 636 if (N->getOperand(0).getOpcode() == ISD::MUL) { 637 Mul = N->getOperand(0); 638 Other = N->getOperand(1); 639 } else if (N->getOperand(1).getOpcode() == ISD::MUL) { 640 Mul = N->getOperand(1); 641 Other = N->getOperand(0); 642 } else { 643 return SDValue(); 644 } 645 SDLoc dl(N); 646 SDValue LL, RL, AddendL, AddendH; 647 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 648 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32)); 649 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 650 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 651 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 652 Other, DAG.getConstant(0, dl, MVT::i32)); 653 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 654 Other, DAG.getConstant(1, dl, MVT::i32)); 655 APInt HighMask = APInt::getHighBitsSet(64, 32); 656 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0)); 657 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1)); 658 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) && 659 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) { 660 // The inputs are both zero-extended. 661 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 662 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 663 AddendL, LL, RL); 664 SDValue Lo(Hi.getNode(), 1); 665 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 666 } 667 if (LHSSB > 32 && RHSSB > 32) { 668 // The inputs are both sign-extended. 669 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 670 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 671 AddendL, LL, RL); 672 SDValue Lo(Hi.getNode(), 1); 673 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 674 } 675 SDValue LH, RH; 676 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 677 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32)); 678 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 679 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32)); 680 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 681 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 682 AddendL, LL, RL); 683 SDValue Lo(Hi.getNode(), 1); 684 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH); 685 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL); 686 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH); 687 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH); 688 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 689 } 690 691 SDValue XCoreTargetLowering:: 692 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const 693 { 694 assert(N->getValueType(0) == MVT::i64 && 695 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 696 "Unknown operand to lower!"); 697 698 if (N->getOpcode() == ISD::ADD) 699 if (SDValue Result = TryExpandADDWithMul(N, DAG)) 700 return Result; 701 702 SDLoc dl(N); 703 704 // Extract components 705 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 706 N->getOperand(0), 707 DAG.getConstant(0, dl, MVT::i32)); 708 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 709 N->getOperand(0), 710 DAG.getConstant(1, dl, MVT::i32)); 711 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 712 N->getOperand(1), 713 DAG.getConstant(0, dl, MVT::i32)); 714 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 715 N->getOperand(1), 716 DAG.getConstant(1, dl, MVT::i32)); 717 718 // Expand 719 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 720 XCoreISD::LSUB; 721 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 722 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 723 LHSL, RHSL, Zero); 724 SDValue Carry(Lo.getNode(), 1); 725 726 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 727 LHSH, RHSH, Carry); 728 SDValue Ignored(Hi.getNode(), 1); 729 // Merge the pieces 730 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 731 } 732 733 SDValue XCoreTargetLowering:: 734 LowerVAARG(SDValue Op, SelectionDAG &DAG) const 735 { 736 // Whist llvm does not support aggregate varargs we can ignore 737 // the possibility of the ValueType being an implicit byVal vararg. 738 SDNode *Node = Op.getNode(); 739 EVT VT = Node->getValueType(0); // not an aggregate 740 SDValue InChain = Node->getOperand(0); 741 SDValue VAListPtr = Node->getOperand(1); 742 EVT PtrVT = VAListPtr.getValueType(); 743 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 744 SDLoc dl(Node); 745 SDValue VAList = 746 DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV)); 747 // Increment the pointer, VAList, to the next vararg 748 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList, 749 DAG.getIntPtrConstant(VT.getSizeInBits() / 8, 750 dl)); 751 // Store the incremented VAList to the legalized pointer 752 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr, 753 MachinePointerInfo(SV)); 754 // Load the actual argument out of the pointer VAList 755 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo()); 756 } 757 758 SDValue XCoreTargetLowering:: 759 LowerVASTART(SDValue Op, SelectionDAG &DAG) const 760 { 761 SDLoc dl(Op); 762 // vastart stores the address of the VarArgsFrameIndex slot into the 763 // memory location argument 764 MachineFunction &MF = DAG.getMachineFunction(); 765 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 766 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 767 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), 768 MachinePointerInfo()); 769 } 770 771 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, 772 SelectionDAG &DAG) const { 773 // This nodes represent llvm.frameaddress on the DAG. 774 // It takes one operand, the index of the frame address to return. 775 // An index of zero corresponds to the current function's frame address. 776 // An index of one to the parent's frame address, and so on. 777 // Depths > 0 not supported yet! 778 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 779 return SDValue(); 780 781 MachineFunction &MF = DAG.getMachineFunction(); 782 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 783 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), 784 RegInfo->getFrameRegister(MF), MVT::i32); 785 } 786 787 SDValue XCoreTargetLowering:: 788 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { 789 // This nodes represent llvm.returnaddress on the DAG. 790 // It takes one operand, the index of the return address to return. 791 // An index of zero corresponds to the current function's return address. 792 // An index of one to the parent's return address, and so on. 793 // Depths > 0 not supported yet! 794 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 795 return SDValue(); 796 797 MachineFunction &MF = DAG.getMachineFunction(); 798 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 799 int FI = XFI->createLRSpillSlot(MF); 800 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 801 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op), 802 DAG.getEntryNode(), FIN, 803 MachinePointerInfo::getFixedStack(MF, FI)); 804 } 805 806 SDValue XCoreTargetLowering:: 807 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { 808 // This node represents offset from frame pointer to first on-stack argument. 809 // This is needed for correct stack adjustment during unwind. 810 // However, we don't know the offset until after the frame has be finalised. 811 // This is done during the XCoreFTAOElim pass. 812 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32); 813 } 814 815 SDValue XCoreTargetLowering:: 816 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 817 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) 818 // This node represents 'eh_return' gcc dwarf builtin, which is used to 819 // return from exception. The general meaning is: adjust stack by OFFSET and 820 // pass execution to HANDLER. 821 MachineFunction &MF = DAG.getMachineFunction(); 822 SDValue Chain = Op.getOperand(0); 823 SDValue Offset = Op.getOperand(1); 824 SDValue Handler = Op.getOperand(2); 825 SDLoc dl(Op); 826 827 // Absolute SP = (FP + FrameToArgs) + Offset 828 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 829 SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 830 RegInfo->getFrameRegister(MF), MVT::i32); 831 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl, 832 MVT::i32); 833 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs); 834 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset); 835 836 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister 837 // which leaves 2 caller saved registers, R2 & R3 for us to use. 838 unsigned StackReg = XCore::R2; 839 unsigned HandlerReg = XCore::R3; 840 841 SDValue OutChains[] = { 842 DAG.getCopyToReg(Chain, dl, StackReg, Stack), 843 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler) 844 }; 845 846 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 847 848 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain, 849 DAG.getRegister(StackReg, MVT::i32), 850 DAG.getRegister(HandlerReg, MVT::i32)); 851 852 } 853 854 SDValue XCoreTargetLowering:: 855 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 856 return Op.getOperand(0); 857 } 858 859 SDValue XCoreTargetLowering:: 860 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 861 SDValue Chain = Op.getOperand(0); 862 SDValue Trmp = Op.getOperand(1); // trampoline 863 SDValue FPtr = Op.getOperand(2); // nested function 864 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 865 866 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 867 868 // .align 4 869 // LDAPF_u10 r11, nest 870 // LDW_2rus r11, r11[0] 871 // STWSP_ru6 r11, sp[0] 872 // LDAPF_u10 r11, fptr 873 // LDW_2rus r11, r11[0] 874 // BAU_1r r11 875 // nest: 876 // .word nest 877 // fptr: 878 // .word fptr 879 SDValue OutChains[5]; 880 881 SDValue Addr = Trmp; 882 883 SDLoc dl(Op); 884 OutChains[0] = 885 DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr, 886 MachinePointerInfo(TrmpAddr)); 887 888 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 889 DAG.getConstant(4, dl, MVT::i32)); 890 OutChains[1] = 891 DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr, 892 MachinePointerInfo(TrmpAddr, 4)); 893 894 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 895 DAG.getConstant(8, dl, MVT::i32)); 896 OutChains[2] = 897 DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr, 898 MachinePointerInfo(TrmpAddr, 8)); 899 900 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 901 DAG.getConstant(12, dl, MVT::i32)); 902 OutChains[3] = 903 DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12)); 904 905 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 906 DAG.getConstant(16, dl, MVT::i32)); 907 OutChains[4] = 908 DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16)); 909 910 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 911 } 912 913 SDValue XCoreTargetLowering:: 914 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 915 SDLoc DL(Op); 916 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 917 switch (IntNo) { 918 case Intrinsic::xcore_crc8: 919 EVT VT = Op.getValueType(); 920 SDValue Data = 921 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT), 922 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3)); 923 SDValue Crc(Data.getNode(), 1); 924 SDValue Results[] = { Crc, Data }; 925 return DAG.getMergeValues(Results, DL); 926 } 927 return SDValue(); 928 } 929 930 SDValue XCoreTargetLowering:: 931 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { 932 SDLoc DL(Op); 933 return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 934 } 935 936 SDValue XCoreTargetLowering:: 937 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { 938 AtomicSDNode *N = cast<AtomicSDNode>(Op); 939 assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); 940 assert((N->getSuccessOrdering() == AtomicOrdering::Unordered || 941 N->getSuccessOrdering() == AtomicOrdering::Monotonic) && 942 "setInsertFencesForAtomic(true) expects unordered / monotonic"); 943 if (N->getMemoryVT() == MVT::i32) { 944 if (N->getAlignment() < 4) 945 report_fatal_error("atomic load must be aligned"); 946 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op), 947 N->getChain(), N->getBasePtr(), N->getPointerInfo(), 948 N->getAlignment(), N->getMemOperand()->getFlags(), 949 N->getAAInfo(), N->getRanges()); 950 } 951 if (N->getMemoryVT() == MVT::i16) { 952 if (N->getAlignment() < 2) 953 report_fatal_error("atomic load must be aligned"); 954 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 955 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 956 N->getAlignment(), N->getMemOperand()->getFlags(), 957 N->getAAInfo()); 958 } 959 if (N->getMemoryVT() == MVT::i8) 960 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 961 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 962 N->getAlignment(), N->getMemOperand()->getFlags(), 963 N->getAAInfo()); 964 return SDValue(); 965 } 966 967 SDValue XCoreTargetLowering:: 968 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { 969 AtomicSDNode *N = cast<AtomicSDNode>(Op); 970 assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); 971 assert((N->getSuccessOrdering() == AtomicOrdering::Unordered || 972 N->getSuccessOrdering() == AtomicOrdering::Monotonic) && 973 "setInsertFencesForAtomic(true) expects unordered / monotonic"); 974 if (N->getMemoryVT() == MVT::i32) { 975 if (N->getAlignment() < 4) 976 report_fatal_error("atomic store must be aligned"); 977 return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(), 978 N->getPointerInfo(), N->getAlignment(), 979 N->getMemOperand()->getFlags(), N->getAAInfo()); 980 } 981 if (N->getMemoryVT() == MVT::i16) { 982 if (N->getAlignment() < 2) 983 report_fatal_error("atomic store must be aligned"); 984 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 985 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 986 N->getAlignment(), N->getMemOperand()->getFlags(), 987 N->getAAInfo()); 988 } 989 if (N->getMemoryVT() == MVT::i8) 990 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 991 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 992 N->getAlignment(), N->getMemOperand()->getFlags(), 993 N->getAAInfo()); 994 return SDValue(); 995 } 996 997 MachineMemOperand::Flags 998 XCoreTargetLowering::getTargetMMOFlags(const Instruction &I) const { 999 // Because of how we convert atomic_load and atomic_store to normal loads and 1000 // stores in the DAG, we need to ensure that the MMOs are marked volatile 1001 // since DAGCombine hasn't been updated to account for atomic, but non 1002 // volatile loads. (See D57601) 1003 if (auto *SI = dyn_cast<StoreInst>(&I)) 1004 if (SI->isAtomic()) 1005 return MachineMemOperand::MOVolatile; 1006 if (auto *LI = dyn_cast<LoadInst>(&I)) 1007 if (LI->isAtomic()) 1008 return MachineMemOperand::MOVolatile; 1009 if (auto *AI = dyn_cast<AtomicRMWInst>(&I)) 1010 if (AI->isAtomic()) 1011 return MachineMemOperand::MOVolatile; 1012 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I)) 1013 if (AI->isAtomic()) 1014 return MachineMemOperand::MOVolatile; 1015 return MachineMemOperand::MONone; 1016 } 1017 1018 //===----------------------------------------------------------------------===// 1019 // Calling Convention Implementation 1020 //===----------------------------------------------------------------------===// 1021 1022 #include "XCoreGenCallingConv.inc" 1023 1024 //===----------------------------------------------------------------------===// 1025 // Call Calling Convention Implementation 1026 //===----------------------------------------------------------------------===// 1027 1028 /// XCore call implementation 1029 SDValue 1030 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1031 SmallVectorImpl<SDValue> &InVals) const { 1032 SelectionDAG &DAG = CLI.DAG; 1033 SDLoc &dl = CLI.DL; 1034 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1035 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1036 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1037 SDValue Chain = CLI.Chain; 1038 SDValue Callee = CLI.Callee; 1039 bool &isTailCall = CLI.IsTailCall; 1040 CallingConv::ID CallConv = CLI.CallConv; 1041 bool isVarArg = CLI.IsVarArg; 1042 1043 // XCore target does not yet support tail call optimization. 1044 isTailCall = false; 1045 1046 // For now, only CallingConv::C implemented 1047 switch (CallConv) 1048 { 1049 default: 1050 report_fatal_error("Unsupported calling convention"); 1051 case CallingConv::Fast: 1052 case CallingConv::C: 1053 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 1054 Outs, OutVals, Ins, dl, DAG, InVals); 1055 } 1056 } 1057 1058 /// LowerCallResult - Lower the result values of a call into the 1059 /// appropriate copies out of appropriate physical registers / memory locations. 1060 static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 1061 const SmallVectorImpl<CCValAssign> &RVLocs, 1062 const SDLoc &dl, SelectionDAG &DAG, 1063 SmallVectorImpl<SDValue> &InVals) { 1064 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs; 1065 // Copy results out of physical registers. 1066 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1067 const CCValAssign &VA = RVLocs[i]; 1068 if (VA.isRegLoc()) { 1069 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(), 1070 InFlag).getValue(1); 1071 InFlag = Chain.getValue(2); 1072 InVals.push_back(Chain.getValue(0)); 1073 } else { 1074 assert(VA.isMemLoc()); 1075 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(), 1076 InVals.size())); 1077 // Reserve space for this result. 1078 InVals.push_back(SDValue()); 1079 } 1080 } 1081 1082 // Copy results out of memory. 1083 SmallVector<SDValue, 4> MemOpChains; 1084 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) { 1085 int offset = ResultMemLocs[i].first; 1086 unsigned index = ResultMemLocs[i].second; 1087 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); 1088 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) }; 1089 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops); 1090 InVals[index] = load; 1091 MemOpChains.push_back(load.getValue(1)); 1092 } 1093 1094 // Transform all loads nodes into one single node because 1095 // all load nodes are independent of each other. 1096 if (!MemOpChains.empty()) 1097 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1098 1099 return Chain; 1100 } 1101 1102 /// LowerCCCCallTo - functions arguments are copied from virtual 1103 /// regs to (physical regs)/(stack frame), CALLSEQ_START and 1104 /// CALLSEQ_END are emitted. 1105 /// TODO: isTailCall, sret. 1106 SDValue XCoreTargetLowering::LowerCCCCallTo( 1107 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 1108 bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs, 1109 const SmallVectorImpl<SDValue> &OutVals, 1110 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1111 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1112 1113 // Analyze operands of the call, assigning locations to each operand. 1114 SmallVector<CCValAssign, 16> ArgLocs; 1115 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1116 *DAG.getContext()); 1117 1118 // The ABI dictates there should be one stack slot available to the callee 1119 // on function entry (for saving lr). 1120 CCInfo.AllocateStack(4, Align(4)); 1121 1122 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 1123 1124 SmallVector<CCValAssign, 16> RVLocs; 1125 // Analyze return values to determine the number of bytes of stack required. 1126 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1127 *DAG.getContext()); 1128 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), Align(4)); 1129 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 1130 1131 // Get a count of how many bytes are to be pushed on the stack. 1132 unsigned NumBytes = RetCCInfo.getNextStackOffset(); 1133 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1134 1135 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 1136 1137 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 1138 SmallVector<SDValue, 12> MemOpChains; 1139 1140 // Walk the register/memloc assignments, inserting copies/loads. 1141 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1142 CCValAssign &VA = ArgLocs[i]; 1143 SDValue Arg = OutVals[i]; 1144 1145 // Promote the value if needed. 1146 switch (VA.getLocInfo()) { 1147 default: llvm_unreachable("Unknown loc info!"); 1148 case CCValAssign::Full: break; 1149 case CCValAssign::SExt: 1150 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1151 break; 1152 case CCValAssign::ZExt: 1153 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1154 break; 1155 case CCValAssign::AExt: 1156 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1157 break; 1158 } 1159 1160 // Arguments that can be passed on register must be kept at 1161 // RegsToPass vector 1162 if (VA.isRegLoc()) { 1163 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1164 } else { 1165 assert(VA.isMemLoc()); 1166 1167 int Offset = VA.getLocMemOffset(); 1168 1169 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 1170 Chain, Arg, 1171 DAG.getConstant(Offset/4, dl, 1172 MVT::i32))); 1173 } 1174 } 1175 1176 // Transform all store nodes into one single node because 1177 // all store nodes are independent of each other. 1178 if (!MemOpChains.empty()) 1179 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1180 1181 // Build a sequence of copy-to-reg nodes chained together with token 1182 // chain and flag operands which copy the outgoing args into registers. 1183 // The InFlag in necessary since all emitted instructions must be 1184 // stuck together. 1185 SDValue InFlag; 1186 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1187 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1188 RegsToPass[i].second, InFlag); 1189 InFlag = Chain.getValue(1); 1190 } 1191 1192 // If the callee is a GlobalAddress node (quite common, every direct call is) 1193 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1194 // Likewise ExternalSymbol -> TargetExternalSymbol. 1195 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1196 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 1197 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 1198 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 1199 1200 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 1201 // = Chain, Callee, Reg#1, Reg#2, ... 1202 // 1203 // Returns a chain & a flag for retval copy to use. 1204 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1205 SmallVector<SDValue, 8> Ops; 1206 Ops.push_back(Chain); 1207 Ops.push_back(Callee); 1208 1209 // Add argument registers to the end of the list so that they are 1210 // known live into the call. 1211 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1212 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1213 RegsToPass[i].second.getValueType())); 1214 1215 if (InFlag.getNode()) 1216 Ops.push_back(InFlag); 1217 1218 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops); 1219 InFlag = Chain.getValue(1); 1220 1221 // Create the CALLSEQ_END node. 1222 Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true), 1223 DAG.getConstant(0, dl, PtrVT, true), InFlag, dl); 1224 InFlag = Chain.getValue(1); 1225 1226 // Handle result values, copying them out of physregs into vregs that we 1227 // return. 1228 return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals); 1229 } 1230 1231 //===----------------------------------------------------------------------===// 1232 // Formal Arguments Calling Convention Implementation 1233 //===----------------------------------------------------------------------===// 1234 1235 namespace { 1236 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; }; 1237 } 1238 1239 /// XCore formal arguments implementation 1240 SDValue XCoreTargetLowering::LowerFormalArguments( 1241 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1242 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1243 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1244 switch (CallConv) 1245 { 1246 default: 1247 report_fatal_error("Unsupported calling convention"); 1248 case CallingConv::C: 1249 case CallingConv::Fast: 1250 return LowerCCCArguments(Chain, CallConv, isVarArg, 1251 Ins, dl, DAG, InVals); 1252 } 1253 } 1254 1255 /// LowerCCCArguments - transform physical registers into 1256 /// virtual registers and generate load operations for 1257 /// arguments places on the stack. 1258 /// TODO: sret 1259 SDValue XCoreTargetLowering::LowerCCCArguments( 1260 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1261 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1262 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1263 MachineFunction &MF = DAG.getMachineFunction(); 1264 MachineFrameInfo &MFI = MF.getFrameInfo(); 1265 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1266 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1267 1268 // Assign locations to all of the incoming arguments. 1269 SmallVector<CCValAssign, 16> ArgLocs; 1270 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1271 *DAG.getContext()); 1272 1273 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 1274 1275 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize(); 1276 1277 unsigned LRSaveSize = StackSlotSize; 1278 1279 if (!isVarArg) 1280 XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize); 1281 1282 // All getCopyFromReg ops must precede any getMemcpys to prevent the 1283 // scheduler clobbering a register before it has been copied. 1284 // The stages are: 1285 // 1. CopyFromReg (and load) arg & vararg registers. 1286 // 2. Chain CopyFromReg nodes into a TokenFactor. 1287 // 3. Memcpy 'byVal' args & push final InVals. 1288 // 4. Chain mem ops nodes into a TokenFactor. 1289 SmallVector<SDValue, 4> CFRegNode; 1290 SmallVector<ArgDataPair, 4> ArgData; 1291 SmallVector<SDValue, 4> MemOps; 1292 1293 // 1a. CopyFromReg (and load) arg registers. 1294 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1295 1296 CCValAssign &VA = ArgLocs[i]; 1297 SDValue ArgIn; 1298 1299 if (VA.isRegLoc()) { 1300 // Arguments passed in registers 1301 EVT RegVT = VA.getLocVT(); 1302 switch (RegVT.getSimpleVT().SimpleTy) { 1303 default: 1304 { 1305 #ifndef NDEBUG 1306 errs() << "LowerFormalArguments Unhandled argument type: " 1307 << RegVT.getEVTString() << "\n"; 1308 #endif 1309 llvm_unreachable(nullptr); 1310 } 1311 case MVT::i32: 1312 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1313 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1314 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 1315 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); 1316 } 1317 } else { 1318 // sanity check 1319 assert(VA.isMemLoc()); 1320 // Load the argument to a virtual register 1321 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 1322 if (ObjSize > StackSlotSize) { 1323 errs() << "LowerFormalArguments Unhandled argument type: " 1324 << EVT(VA.getLocVT()).getEVTString() 1325 << "\n"; 1326 } 1327 // Create the frame index object for this incoming parameter... 1328 int FI = MFI.CreateFixedObject(ObjSize, 1329 LRSaveSize + VA.getLocMemOffset(), 1330 true); 1331 1332 // Create the SelectionDAG nodes corresponding to a load 1333 //from this parameter 1334 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1335 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1336 MachinePointerInfo::getFixedStack(MF, FI)); 1337 } 1338 const ArgDataPair ADP = { ArgIn, Ins[i].Flags }; 1339 ArgData.push_back(ADP); 1340 } 1341 1342 // 1b. CopyFromReg vararg registers. 1343 if (isVarArg) { 1344 // Argument registers 1345 static const MCPhysReg ArgRegs[] = { 1346 XCore::R0, XCore::R1, XCore::R2, XCore::R3 1347 }; 1348 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1349 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs); 1350 if (FirstVAReg < array_lengthof(ArgRegs)) { 1351 int offset = 0; 1352 // Save remaining registers, storing higher register numbers at a higher 1353 // address 1354 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) { 1355 // Create a stack slot 1356 int FI = MFI.CreateFixedObject(4, offset, true); 1357 if (i == (int)FirstVAReg) { 1358 XFI->setVarArgsFrameIndex(FI); 1359 } 1360 offset -= StackSlotSize; 1361 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1362 // Move argument from phys reg -> virt reg 1363 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1364 RegInfo.addLiveIn(ArgRegs[i], VReg); 1365 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1366 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); 1367 // Move argument from virt reg -> stack 1368 SDValue Store = 1369 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 1370 MemOps.push_back(Store); 1371 } 1372 } else { 1373 // This will point to the next argument passed via stack. 1374 XFI->setVarArgsFrameIndex( 1375 MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), 1376 true)); 1377 } 1378 } 1379 1380 // 2. chain CopyFromReg nodes into a TokenFactor. 1381 if (!CFRegNode.empty()) 1382 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode); 1383 1384 // 3. Memcpy 'byVal' args & push final InVals. 1385 // Aggregates passed "byVal" need to be copied by the callee. 1386 // The callee will use a pointer to this copy, rather than the original 1387 // pointer. 1388 for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(), 1389 ArgDE = ArgData.end(); 1390 ArgDI != ArgDE; ++ArgDI) { 1391 if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) { 1392 unsigned Size = ArgDI->Flags.getByValSize(); 1393 Align Alignment = 1394 std::max(Align(StackSlotSize), ArgDI->Flags.getNonZeroByValAlign()); 1395 // Create a new object on the stack and copy the pointee into it. 1396 int FI = MFI.CreateStackObject(Size, Alignment, false); 1397 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1398 InVals.push_back(FIN); 1399 MemOps.push_back(DAG.getMemcpy( 1400 Chain, dl, FIN, ArgDI->SDV, DAG.getConstant(Size, dl, MVT::i32), 1401 Alignment, false, false, false, MachinePointerInfo(), 1402 MachinePointerInfo())); 1403 } else { 1404 InVals.push_back(ArgDI->SDV); 1405 } 1406 } 1407 1408 // 4, chain mem ops nodes into a TokenFactor. 1409 if (!MemOps.empty()) { 1410 MemOps.push_back(Chain); 1411 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 1412 } 1413 1414 return Chain; 1415 } 1416 1417 //===----------------------------------------------------------------------===// 1418 // Return Value Calling Convention Implementation 1419 //===----------------------------------------------------------------------===// 1420 1421 bool XCoreTargetLowering:: 1422 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 1423 bool isVarArg, 1424 const SmallVectorImpl<ISD::OutputArg> &Outs, 1425 LLVMContext &Context) const { 1426 SmallVector<CCValAssign, 16> RVLocs; 1427 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 1428 if (!CCInfo.CheckReturn(Outs, RetCC_XCore)) 1429 return false; 1430 if (CCInfo.getNextStackOffset() != 0 && isVarArg) 1431 return false; 1432 return true; 1433 } 1434 1435 SDValue 1436 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1437 bool isVarArg, 1438 const SmallVectorImpl<ISD::OutputArg> &Outs, 1439 const SmallVectorImpl<SDValue> &OutVals, 1440 const SDLoc &dl, SelectionDAG &DAG) const { 1441 1442 XCoreFunctionInfo *XFI = 1443 DAG.getMachineFunction().getInfo<XCoreFunctionInfo>(); 1444 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 1445 1446 // CCValAssign - represent the assignment of 1447 // the return value to a location 1448 SmallVector<CCValAssign, 16> RVLocs; 1449 1450 // CCState - Info about the registers and stack slot. 1451 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1452 *DAG.getContext()); 1453 1454 // Analyze return values. 1455 if (!isVarArg) 1456 CCInfo.AllocateStack(XFI->getReturnStackOffset(), Align(4)); 1457 1458 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 1459 1460 SDValue Flag; 1461 SmallVector<SDValue, 4> RetOps(1, Chain); 1462 1463 // Return on XCore is always a "retsp 0" 1464 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32)); 1465 1466 SmallVector<SDValue, 4> MemOpChains; 1467 // Handle return values that must be copied to memory. 1468 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1469 CCValAssign &VA = RVLocs[i]; 1470 if (VA.isRegLoc()) 1471 continue; 1472 assert(VA.isMemLoc()); 1473 if (isVarArg) { 1474 report_fatal_error("Can't return value from vararg function in memory"); 1475 } 1476 1477 int Offset = VA.getLocMemOffset(); 1478 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8; 1479 // Create the frame index object for the memory location. 1480 int FI = MFI.CreateFixedObject(ObjSize, Offset, false); 1481 1482 // Create a SelectionDAG node corresponding to a store 1483 // to this memory location. 1484 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1485 MemOpChains.push_back(DAG.getStore( 1486 Chain, dl, OutVals[i], FIN, 1487 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 1488 } 1489 1490 // Transform all store nodes into one single node because 1491 // all stores are independent of each other. 1492 if (!MemOpChains.empty()) 1493 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1494 1495 // Now handle return values copied to registers. 1496 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1497 CCValAssign &VA = RVLocs[i]; 1498 if (!VA.isRegLoc()) 1499 continue; 1500 // Copy the result values into the output registers. 1501 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); 1502 1503 // guarantee that all emitted copies are 1504 // stuck together, avoiding something bad 1505 Flag = Chain.getValue(1); 1506 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1507 } 1508 1509 RetOps[0] = Chain; // Update chain. 1510 1511 // Add the flag if we have it. 1512 if (Flag.getNode()) 1513 RetOps.push_back(Flag); 1514 1515 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps); 1516 } 1517 1518 //===----------------------------------------------------------------------===// 1519 // Other Lowering Code 1520 //===----------------------------------------------------------------------===// 1521 1522 MachineBasicBlock * 1523 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 1524 MachineBasicBlock *BB) const { 1525 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1526 DebugLoc dl = MI.getDebugLoc(); 1527 assert((MI.getOpcode() == XCore::SELECT_CC) && 1528 "Unexpected instr type to insert"); 1529 1530 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1531 // control-flow pattern. The incoming instruction knows the destination vreg 1532 // to set, the condition code register to branch on, the true/false values to 1533 // select between, and a branch opcode to use. 1534 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1535 MachineFunction::iterator It = ++BB->getIterator(); 1536 1537 // thisMBB: 1538 // ... 1539 // TrueVal = ... 1540 // cmpTY ccX, r1, r2 1541 // bCC copy1MBB 1542 // fallthrough --> copy0MBB 1543 MachineBasicBlock *thisMBB = BB; 1544 MachineFunction *F = BB->getParent(); 1545 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1546 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1547 F->insert(It, copy0MBB); 1548 F->insert(It, sinkMBB); 1549 1550 // Transfer the remainder of BB and its successor edges to sinkMBB. 1551 sinkMBB->splice(sinkMBB->begin(), BB, 1552 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1553 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1554 1555 // Next, add the true and fallthrough blocks as its successors. 1556 BB->addSuccessor(copy0MBB); 1557 BB->addSuccessor(sinkMBB); 1558 1559 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1560 .addReg(MI.getOperand(1).getReg()) 1561 .addMBB(sinkMBB); 1562 1563 // copy0MBB: 1564 // %FalseValue = ... 1565 // # fallthrough to sinkMBB 1566 BB = copy0MBB; 1567 1568 // Update machine-CFG edges 1569 BB->addSuccessor(sinkMBB); 1570 1571 // sinkMBB: 1572 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1573 // ... 1574 BB = sinkMBB; 1575 BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg()) 1576 .addReg(MI.getOperand(3).getReg()) 1577 .addMBB(copy0MBB) 1578 .addReg(MI.getOperand(2).getReg()) 1579 .addMBB(thisMBB); 1580 1581 MI.eraseFromParent(); // The pseudo instruction is gone now. 1582 return BB; 1583 } 1584 1585 //===----------------------------------------------------------------------===// 1586 // Target Optimization Hooks 1587 //===----------------------------------------------------------------------===// 1588 1589 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1590 DAGCombinerInfo &DCI) const { 1591 SelectionDAG &DAG = DCI.DAG; 1592 SDLoc dl(N); 1593 switch (N->getOpcode()) { 1594 default: break; 1595 case ISD::INTRINSIC_VOID: 1596 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 1597 case Intrinsic::xcore_outt: 1598 case Intrinsic::xcore_outct: 1599 case Intrinsic::xcore_chkct: { 1600 SDValue OutVal = N->getOperand(3); 1601 // These instructions ignore the high bits. 1602 if (OutVal.hasOneUse()) { 1603 unsigned BitWidth = OutVal.getValueSizeInBits(); 1604 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); 1605 KnownBits Known; 1606 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1607 !DCI.isBeforeLegalizeOps()); 1608 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1609 if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) || 1610 TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO)) 1611 DCI.CommitTargetLoweringOpt(TLO); 1612 } 1613 break; 1614 } 1615 case Intrinsic::xcore_setpt: { 1616 SDValue Time = N->getOperand(3); 1617 // This instruction ignores the high bits. 1618 if (Time.hasOneUse()) { 1619 unsigned BitWidth = Time.getValueSizeInBits(); 1620 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 1621 KnownBits Known; 1622 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1623 !DCI.isBeforeLegalizeOps()); 1624 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1625 if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) || 1626 TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO)) 1627 DCI.CommitTargetLoweringOpt(TLO); 1628 } 1629 break; 1630 } 1631 } 1632 break; 1633 case XCoreISD::LADD: { 1634 SDValue N0 = N->getOperand(0); 1635 SDValue N1 = N->getOperand(1); 1636 SDValue N2 = N->getOperand(2); 1637 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1638 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1639 EVT VT = N0.getValueType(); 1640 1641 // canonicalize constant to RHS 1642 if (N0C && !N1C) 1643 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2); 1644 1645 // fold (ladd 0, 0, x) -> 0, x & 1 1646 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1647 SDValue Carry = DAG.getConstant(0, dl, VT); 1648 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2, 1649 DAG.getConstant(1, dl, VT)); 1650 SDValue Ops[] = { Result, Carry }; 1651 return DAG.getMergeValues(Ops, dl); 1652 } 1653 1654 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the 1655 // low bit set 1656 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1657 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1658 VT.getSizeInBits() - 1); 1659 KnownBits Known = DAG.computeKnownBits(N2); 1660 if ((Known.Zero & Mask) == Mask) { 1661 SDValue Carry = DAG.getConstant(0, dl, VT); 1662 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2); 1663 SDValue Ops[] = { Result, Carry }; 1664 return DAG.getMergeValues(Ops, dl); 1665 } 1666 } 1667 } 1668 break; 1669 case XCoreISD::LSUB: { 1670 SDValue N0 = N->getOperand(0); 1671 SDValue N1 = N->getOperand(1); 1672 SDValue N2 = N->getOperand(2); 1673 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1674 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1675 EVT VT = N0.getValueType(); 1676 1677 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set 1678 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1679 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1680 VT.getSizeInBits() - 1); 1681 KnownBits Known = DAG.computeKnownBits(N2); 1682 if ((Known.Zero & Mask) == Mask) { 1683 SDValue Borrow = N2; 1684 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, 1685 DAG.getConstant(0, dl, VT), N2); 1686 SDValue Ops[] = { Result, Borrow }; 1687 return DAG.getMergeValues(Ops, dl); 1688 } 1689 } 1690 1691 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the 1692 // low bit set 1693 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1694 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1695 VT.getSizeInBits() - 1); 1696 KnownBits Known = DAG.computeKnownBits(N2); 1697 if ((Known.Zero & Mask) == Mask) { 1698 SDValue Borrow = DAG.getConstant(0, dl, VT); 1699 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2); 1700 SDValue Ops[] = { Result, Borrow }; 1701 return DAG.getMergeValues(Ops, dl); 1702 } 1703 } 1704 } 1705 break; 1706 case XCoreISD::LMUL: { 1707 SDValue N0 = N->getOperand(0); 1708 SDValue N1 = N->getOperand(1); 1709 SDValue N2 = N->getOperand(2); 1710 SDValue N3 = N->getOperand(3); 1711 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1712 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1713 EVT VT = N0.getValueType(); 1714 // Canonicalize multiplicative constant to RHS. If both multiplicative 1715 // operands are constant canonicalize smallest to RHS. 1716 if ((N0C && !N1C) || 1717 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue())) 1718 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT), 1719 N1, N0, N2, N3); 1720 1721 // lmul(x, 0, a, b) 1722 if (N1C && N1C->isNullValue()) { 1723 // If the high result is unused fold to add(a, b) 1724 if (N->hasNUsesOfValue(0, 0)) { 1725 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3); 1726 SDValue Ops[] = { Lo, Lo }; 1727 return DAG.getMergeValues(Ops, dl); 1728 } 1729 // Otherwise fold to ladd(a, b, 0) 1730 SDValue Result = 1731 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); 1732 SDValue Carry(Result.getNode(), 1); 1733 SDValue Ops[] = { Carry, Result }; 1734 return DAG.getMergeValues(Ops, dl); 1735 } 1736 } 1737 break; 1738 case ISD::ADD: { 1739 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) -> 1740 // lmul(x, y, a, b). The high result of lmul will be ignored. 1741 // This is only profitable if the intermediate results are unused 1742 // elsewhere. 1743 SDValue Mul0, Mul1, Addend0, Addend1; 1744 if (N->getValueType(0) == MVT::i32 && 1745 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) { 1746 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl, 1747 DAG.getVTList(MVT::i32, MVT::i32), Mul0, 1748 Mul1, Addend0, Addend1); 1749 SDValue Result(Ignored.getNode(), 1); 1750 return Result; 1751 } 1752 APInt HighMask = APInt::getHighBitsSet(64, 32); 1753 // Fold 64 bit expression such as add(add(mul(x,y),a),b) -> 1754 // lmul(x, y, a, b) if all operands are zero-extended. We do this 1755 // before type legalization as it is messy to match the operands after 1756 // that. 1757 if (N->getValueType(0) == MVT::i64 && 1758 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) && 1759 DAG.MaskedValueIsZero(Mul0, HighMask) && 1760 DAG.MaskedValueIsZero(Mul1, HighMask) && 1761 DAG.MaskedValueIsZero(Addend0, HighMask) && 1762 DAG.MaskedValueIsZero(Addend1, HighMask)) { 1763 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1764 Mul0, DAG.getConstant(0, dl, MVT::i32)); 1765 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1766 Mul1, DAG.getConstant(0, dl, MVT::i32)); 1767 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1768 Addend0, DAG.getConstant(0, dl, MVT::i32)); 1769 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1770 Addend1, DAG.getConstant(0, dl, MVT::i32)); 1771 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 1772 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L, 1773 Addend0L, Addend1L); 1774 SDValue Lo(Hi.getNode(), 1); 1775 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 1776 } 1777 } 1778 break; 1779 case ISD::STORE: { 1780 // Replace unaligned store of unaligned load with memmove. 1781 StoreSDNode *ST = cast<StoreSDNode>(N); 1782 if (!DCI.isBeforeLegalize() || 1783 allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 1784 ST->getMemoryVT(), 1785 *ST->getMemOperand()) || 1786 ST->isVolatile() || ST->isIndexed()) { 1787 break; 1788 } 1789 SDValue Chain = ST->getChain(); 1790 1791 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1792 assert((StoreBits % 8) == 0 && 1793 "Store size in bits must be a multiple of 8"); 1794 unsigned Alignment = ST->getAlignment(); 1795 1796 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1797 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1798 LD->getAlignment() == Alignment && 1799 !LD->isVolatile() && !LD->isIndexed() && 1800 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1801 bool isTail = isInTailCallPosition(DAG, ST, Chain); 1802 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), LD->getBasePtr(), 1803 DAG.getConstant(StoreBits / 8, dl, MVT::i32), 1804 Align(Alignment), false, isTail, 1805 ST->getPointerInfo(), LD->getPointerInfo()); 1806 } 1807 } 1808 break; 1809 } 1810 } 1811 return SDValue(); 1812 } 1813 1814 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 1815 KnownBits &Known, 1816 const APInt &DemandedElts, 1817 const SelectionDAG &DAG, 1818 unsigned Depth) const { 1819 Known.resetAll(); 1820 switch (Op.getOpcode()) { 1821 default: break; 1822 case XCoreISD::LADD: 1823 case XCoreISD::LSUB: 1824 if (Op.getResNo() == 1) { 1825 // Top bits of carry / borrow are clear. 1826 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1827 Known.getBitWidth() - 1); 1828 } 1829 break; 1830 case ISD::INTRINSIC_W_CHAIN: 1831 { 1832 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1833 switch (IntNo) { 1834 case Intrinsic::xcore_getts: 1835 // High bits are known to be zero. 1836 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1837 Known.getBitWidth() - 16); 1838 break; 1839 case Intrinsic::xcore_int: 1840 case Intrinsic::xcore_inct: 1841 // High bits are known to be zero. 1842 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1843 Known.getBitWidth() - 8); 1844 break; 1845 case Intrinsic::xcore_testct: 1846 // Result is either 0 or 1. 1847 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1848 Known.getBitWidth() - 1); 1849 break; 1850 case Intrinsic::xcore_testwct: 1851 // Result is in the range 0 - 4. 1852 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), 1853 Known.getBitWidth() - 3); 1854 break; 1855 } 1856 } 1857 break; 1858 } 1859 } 1860 1861 //===----------------------------------------------------------------------===// 1862 // Addressing mode description hooks 1863 //===----------------------------------------------------------------------===// 1864 1865 static inline bool isImmUs(int64_t val) 1866 { 1867 return (val >= 0 && val <= 11); 1868 } 1869 1870 static inline bool isImmUs2(int64_t val) 1871 { 1872 return (val%2 == 0 && isImmUs(val/2)); 1873 } 1874 1875 static inline bool isImmUs4(int64_t val) 1876 { 1877 return (val%4 == 0 && isImmUs(val/4)); 1878 } 1879 1880 /// isLegalAddressingMode - Return true if the addressing mode represented 1881 /// by AM is legal for this target, for a load/store of the specified type. 1882 bool XCoreTargetLowering::isLegalAddressingMode(const DataLayout &DL, 1883 const AddrMode &AM, Type *Ty, 1884 unsigned AS, 1885 Instruction *I) const { 1886 if (Ty->getTypeID() == Type::VoidTyID) 1887 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); 1888 1889 unsigned Size = DL.getTypeAllocSize(Ty); 1890 if (AM.BaseGV) { 1891 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1892 AM.BaseOffs%4 == 0; 1893 } 1894 1895 switch (Size) { 1896 case 1: 1897 // reg + imm 1898 if (AM.Scale == 0) { 1899 return isImmUs(AM.BaseOffs); 1900 } 1901 // reg + reg 1902 return AM.Scale == 1 && AM.BaseOffs == 0; 1903 case 2: 1904 case 3: 1905 // reg + imm 1906 if (AM.Scale == 0) { 1907 return isImmUs2(AM.BaseOffs); 1908 } 1909 // reg + reg<<1 1910 return AM.Scale == 2 && AM.BaseOffs == 0; 1911 default: 1912 // reg + imm 1913 if (AM.Scale == 0) { 1914 return isImmUs4(AM.BaseOffs); 1915 } 1916 // reg + reg<<2 1917 return AM.Scale == 4 && AM.BaseOffs == 0; 1918 } 1919 } 1920 1921 //===----------------------------------------------------------------------===// 1922 // XCore Inline Assembly Support 1923 //===----------------------------------------------------------------------===// 1924 1925 std::pair<unsigned, const TargetRegisterClass *> 1926 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 1927 StringRef Constraint, 1928 MVT VT) const { 1929 if (Constraint.size() == 1) { 1930 switch (Constraint[0]) { 1931 default : break; 1932 case 'r': 1933 return std::make_pair(0U, &XCore::GRRegsRegClass); 1934 } 1935 } 1936 // Use the default implementation in TargetLowering to convert the register 1937 // constraint into a member of a register class. 1938 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 1939 } 1940