1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the FastISel class. 10 // 11 // "Fast" instruction selection is designed to emit very poor code quickly. 12 // Also, it is not designed to be able to do much lowering, so most illegal 13 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is 14 // also not intended to be able to do much optimization, except in a few cases 15 // where doing optimizations reduces overall compile time. For example, folding 16 // constants into immediate fields is often done, because it's cheap and it 17 // reduces the number of instructions later phases have to examine. 18 // 19 // "Fast" instruction selection is able to fail gracefully and transfer 20 // control to the SelectionDAG selector for operations that it doesn't 21 // support. In many cases, this allows us to avoid duplicating a lot of 22 // the complicated lowering logic that SelectionDAG currently has. 23 // 24 // The intended use for "fast" instruction selection is "-O0" mode 25 // compilation, where the quality of the generated code is irrelevant when 26 // weighed against the speed at which the code can be generated. Also, 27 // at -O0, the LLVM optimizers are not running, and this makes the 28 // compile time of codegen a much higher portion of the overall compile 29 // time. Despite its limitations, "fast" instruction selection is able to 30 // handle enough code on its own to provide noticeable overall speedups 31 // in -O0 compiles. 32 // 33 // Basic operations are supported in a target-independent way, by reading 34 // the same instruction descriptions that the SelectionDAG selector reads, 35 // and identifying simple arithmetic operations that can be directly selected 36 // from simple operators. More complicated operations currently require 37 // target-specific code. 38 // 39 //===----------------------------------------------------------------------===// 40 41 #include "llvm/CodeGen/FastISel.h" 42 #include "llvm/ADT/APFloat.h" 43 #include "llvm/ADT/APSInt.h" 44 #include "llvm/ADT/DenseMap.h" 45 #include "llvm/ADT/SmallPtrSet.h" 46 #include "llvm/ADT/SmallString.h" 47 #include "llvm/ADT/SmallVector.h" 48 #include "llvm/ADT/Statistic.h" 49 #include "llvm/Analysis/BranchProbabilityInfo.h" 50 #include "llvm/Analysis/TargetLibraryInfo.h" 51 #include "llvm/CodeGen/Analysis.h" 52 #include "llvm/CodeGen/FunctionLoweringInfo.h" 53 #include "llvm/CodeGen/ISDOpcodes.h" 54 #include "llvm/CodeGen/MachineBasicBlock.h" 55 #include "llvm/CodeGen/MachineFrameInfo.h" 56 #include "llvm/CodeGen/MachineInstr.h" 57 #include "llvm/CodeGen/MachineInstrBuilder.h" 58 #include "llvm/CodeGen/MachineMemOperand.h" 59 #include "llvm/CodeGen/MachineModuleInfo.h" 60 #include "llvm/CodeGen/MachineOperand.h" 61 #include "llvm/CodeGen/MachineRegisterInfo.h" 62 #include "llvm/CodeGen/MachineValueType.h" 63 #include "llvm/CodeGen/StackMaps.h" 64 #include "llvm/CodeGen/TargetInstrInfo.h" 65 #include "llvm/CodeGen/TargetLowering.h" 66 #include "llvm/CodeGen/TargetSubtargetInfo.h" 67 #include "llvm/CodeGen/ValueTypes.h" 68 #include "llvm/IR/Argument.h" 69 #include "llvm/IR/Attributes.h" 70 #include "llvm/IR/BasicBlock.h" 71 #include "llvm/IR/CallingConv.h" 72 #include "llvm/IR/Constant.h" 73 #include "llvm/IR/Constants.h" 74 #include "llvm/IR/DataLayout.h" 75 #include "llvm/IR/DebugLoc.h" 76 #include "llvm/IR/DerivedTypes.h" 77 #include "llvm/IR/DiagnosticInfo.h" 78 #include "llvm/IR/Function.h" 79 #include "llvm/IR/GetElementPtrTypeIterator.h" 80 #include "llvm/IR/GlobalValue.h" 81 #include "llvm/IR/InlineAsm.h" 82 #include "llvm/IR/InstrTypes.h" 83 #include "llvm/IR/Instruction.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/IntrinsicInst.h" 86 #include "llvm/IR/LLVMContext.h" 87 #include "llvm/IR/Mangler.h" 88 #include "llvm/IR/Metadata.h" 89 #include "llvm/IR/Operator.h" 90 #include "llvm/IR/PatternMatch.h" 91 #include "llvm/IR/Type.h" 92 #include "llvm/IR/User.h" 93 #include "llvm/IR/Value.h" 94 #include "llvm/MC/MCContext.h" 95 #include "llvm/MC/MCInstrDesc.h" 96 #include "llvm/Support/Casting.h" 97 #include "llvm/Support/Debug.h" 98 #include "llvm/Support/ErrorHandling.h" 99 #include "llvm/Support/MathExtras.h" 100 #include "llvm/Support/raw_ostream.h" 101 #include "llvm/Target/TargetMachine.h" 102 #include "llvm/Target/TargetOptions.h" 103 #include <algorithm> 104 #include <cassert> 105 #include <cstdint> 106 #include <iterator> 107 #include <optional> 108 #include <utility> 109 110 using namespace llvm; 111 using namespace PatternMatch; 112 113 #define DEBUG_TYPE "isel" 114 115 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by " 116 "target-independent selector"); 117 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by " 118 "target-specific selector"); 119 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure"); 120 121 /// Set the current block to which generated machine instructions will be 122 /// appended. 123 void FastISel::startNewBlock() { 124 assert(LocalValueMap.empty() && 125 "local values should be cleared after finishing a BB"); 126 127 // Instructions are appended to FuncInfo.MBB. If the basic block already 128 // contains labels or copies, use the last instruction as the last local 129 // value. 130 EmitStartPt = nullptr; 131 if (!FuncInfo.MBB->empty()) 132 EmitStartPt = &FuncInfo.MBB->back(); 133 LastLocalValue = EmitStartPt; 134 } 135 136 void FastISel::finishBasicBlock() { flushLocalValueMap(); } 137 138 bool FastISel::lowerArguments() { 139 if (!FuncInfo.CanLowerReturn) 140 // Fallback to SDISel argument lowering code to deal with sret pointer 141 // parameter. 142 return false; 143 144 if (!fastLowerArguments()) 145 return false; 146 147 // Enter arguments into ValueMap for uses in non-entry BBs. 148 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(), 149 E = FuncInfo.Fn->arg_end(); 150 I != E; ++I) { 151 DenseMap<const Value *, Register>::iterator VI = LocalValueMap.find(&*I); 152 assert(VI != LocalValueMap.end() && "Missed an argument?"); 153 FuncInfo.ValueMap[&*I] = VI->second; 154 } 155 return true; 156 } 157 158 /// Return the defined register if this instruction defines exactly one 159 /// virtual register and uses no other virtual registers. Otherwise return 0. 160 static Register findLocalRegDef(MachineInstr &MI) { 161 Register RegDef; 162 for (const MachineOperand &MO : MI.operands()) { 163 if (!MO.isReg()) 164 continue; 165 if (MO.isDef()) { 166 if (RegDef) 167 return Register(); 168 RegDef = MO.getReg(); 169 } else if (MO.getReg().isVirtual()) { 170 // This is another use of a vreg. Don't delete it. 171 return Register(); 172 } 173 } 174 return RegDef; 175 } 176 177 static bool isRegUsedByPhiNodes(Register DefReg, 178 FunctionLoweringInfo &FuncInfo) { 179 for (auto &P : FuncInfo.PHINodesToUpdate) 180 if (P.second == DefReg) 181 return true; 182 return false; 183 } 184 185 void FastISel::flushLocalValueMap() { 186 // If FastISel bails out, it could leave local value instructions behind 187 // that aren't used for anything. Detect and erase those. 188 if (LastLocalValue != EmitStartPt) { 189 // Save the first instruction after local values, for later. 190 MachineBasicBlock::iterator FirstNonValue(LastLocalValue); 191 ++FirstNonValue; 192 193 MachineBasicBlock::reverse_iterator RE = 194 EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt) 195 : FuncInfo.MBB->rend(); 196 MachineBasicBlock::reverse_iterator RI(LastLocalValue); 197 for (MachineInstr &LocalMI : 198 llvm::make_early_inc_range(llvm::make_range(RI, RE))) { 199 Register DefReg = findLocalRegDef(LocalMI); 200 if (!DefReg) 201 continue; 202 if (FuncInfo.RegsWithFixups.count(DefReg)) 203 continue; 204 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo); 205 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) { 206 if (EmitStartPt == &LocalMI) 207 EmitStartPt = EmitStartPt->getPrevNode(); 208 LLVM_DEBUG(dbgs() << "removing dead local value materialization" 209 << LocalMI); 210 LocalMI.eraseFromParent(); 211 } 212 } 213 214 if (FirstNonValue != FuncInfo.MBB->end()) { 215 // See if there are any local value instructions left. If so, we want to 216 // make sure the first one has a debug location; if it doesn't, use the 217 // first non-value instruction's debug location. 218 219 // If EmitStartPt is non-null, this block had copies at the top before 220 // FastISel started doing anything; it points to the last one, so the 221 // first local value instruction is the one after EmitStartPt. 222 // If EmitStartPt is null, the first local value instruction is at the 223 // top of the block. 224 MachineBasicBlock::iterator FirstLocalValue = 225 EmitStartPt ? ++MachineBasicBlock::iterator(EmitStartPt) 226 : FuncInfo.MBB->begin(); 227 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc()) 228 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc()); 229 } 230 } 231 232 LocalValueMap.clear(); 233 LastLocalValue = EmitStartPt; 234 recomputeInsertPt(); 235 SavedInsertPt = FuncInfo.InsertPt; 236 } 237 238 Register FastISel::getRegForValue(const Value *V) { 239 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true); 240 // Don't handle non-simple values in FastISel. 241 if (!RealVT.isSimple()) 242 return Register(); 243 244 // Ignore illegal types. We must do this before looking up the value 245 // in ValueMap because Arguments are given virtual registers regardless 246 // of whether FastISel can handle them. 247 MVT VT = RealVT.getSimpleVT(); 248 if (!TLI.isTypeLegal(VT)) { 249 // Handle integer promotions, though, because they're common and easy. 250 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 251 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT(); 252 else 253 return Register(); 254 } 255 256 // Look up the value to see if we already have a register for it. 257 Register Reg = lookUpRegForValue(V); 258 if (Reg) 259 return Reg; 260 261 // In bottom-up mode, just create the virtual register which will be used 262 // to hold the value. It will be materialized later. 263 if (isa<Instruction>(V) && 264 (!isa<AllocaInst>(V) || 265 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) 266 return FuncInfo.InitializeRegForValue(V); 267 268 SavePoint SaveInsertPt = enterLocalValueArea(); 269 270 // Materialize the value in a register. Emit any instructions in the 271 // local value area. 272 Reg = materializeRegForValue(V, VT); 273 274 leaveLocalValueArea(SaveInsertPt); 275 276 return Reg; 277 } 278 279 Register FastISel::materializeConstant(const Value *V, MVT VT) { 280 Register Reg; 281 if (const auto *CI = dyn_cast<ConstantInt>(V)) { 282 if (CI->getValue().getActiveBits() <= 64) 283 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 284 } else if (isa<AllocaInst>(V)) 285 Reg = fastMaterializeAlloca(cast<AllocaInst>(V)); 286 else if (isa<ConstantPointerNull>(V)) 287 // Translate this as an integer zero so that it can be 288 // local-CSE'd with actual integer zeros. 289 Reg = 290 getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType()))); 291 else if (const auto *CF = dyn_cast<ConstantFP>(V)) { 292 if (CF->isNullValue()) 293 Reg = fastMaterializeFloatZero(CF); 294 else 295 // Try to emit the constant directly. 296 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF); 297 298 if (!Reg) { 299 // Try to emit the constant by using an integer constant with a cast. 300 const APFloat &Flt = CF->getValueAPF(); 301 EVT IntVT = TLI.getPointerTy(DL); 302 uint32_t IntBitWidth = IntVT.getSizeInBits(); 303 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false); 304 bool isExact; 305 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact); 306 if (isExact) { 307 Register IntegerReg = 308 getRegForValue(ConstantInt::get(V->getContext(), SIntVal)); 309 if (IntegerReg) 310 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, 311 IntegerReg); 312 } 313 } 314 } else if (const auto *Op = dyn_cast<Operator>(V)) { 315 if (!selectOperator(Op, Op->getOpcode())) 316 if (!isa<Instruction>(Op) || 317 !fastSelectInstruction(cast<Instruction>(Op))) 318 return 0; 319 Reg = lookUpRegForValue(Op); 320 } else if (isa<UndefValue>(V)) { 321 Reg = createResultReg(TLI.getRegClassFor(VT)); 322 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 323 TII.get(TargetOpcode::IMPLICIT_DEF), Reg); 324 } 325 return Reg; 326 } 327 328 /// Helper for getRegForValue. This function is called when the value isn't 329 /// already available in a register and must be materialized with new 330 /// instructions. 331 Register FastISel::materializeRegForValue(const Value *V, MVT VT) { 332 Register Reg; 333 // Give the target-specific code a try first. 334 if (isa<Constant>(V)) 335 Reg = fastMaterializeConstant(cast<Constant>(V)); 336 337 // If target-specific code couldn't or didn't want to handle the value, then 338 // give target-independent code a try. 339 if (!Reg) 340 Reg = materializeConstant(V, VT); 341 342 // Don't cache constant materializations in the general ValueMap. 343 // To do so would require tracking what uses they dominate. 344 if (Reg) { 345 LocalValueMap[V] = Reg; 346 LastLocalValue = MRI.getVRegDef(Reg); 347 } 348 return Reg; 349 } 350 351 Register FastISel::lookUpRegForValue(const Value *V) { 352 // Look up the value to see if we already have a register for it. We 353 // cache values defined by Instructions across blocks, and other values 354 // only locally. This is because Instructions already have the SSA 355 // def-dominates-use requirement enforced. 356 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(V); 357 if (I != FuncInfo.ValueMap.end()) 358 return I->second; 359 return LocalValueMap[V]; 360 } 361 362 void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) { 363 if (!isa<Instruction>(I)) { 364 LocalValueMap[I] = Reg; 365 return; 366 } 367 368 Register &AssignedReg = FuncInfo.ValueMap[I]; 369 if (!AssignedReg) 370 // Use the new register. 371 AssignedReg = Reg; 372 else if (Reg != AssignedReg) { 373 // Arrange for uses of AssignedReg to be replaced by uses of Reg. 374 for (unsigned i = 0; i < NumRegs; i++) { 375 FuncInfo.RegFixups[AssignedReg + i] = Reg + i; 376 FuncInfo.RegsWithFixups.insert(Reg + i); 377 } 378 379 AssignedReg = Reg; 380 } 381 } 382 383 Register FastISel::getRegForGEPIndex(const Value *Idx) { 384 Register IdxN = getRegForValue(Idx); 385 if (!IdxN) 386 // Unhandled operand. Halt "fast" selection and bail. 387 return Register(); 388 389 // If the index is smaller or larger than intptr_t, truncate or extend it. 390 MVT PtrVT = TLI.getPointerTy(DL); 391 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); 392 if (IdxVT.bitsLT(PtrVT)) { 393 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN); 394 } else if (IdxVT.bitsGT(PtrVT)) { 395 IdxN = 396 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN); 397 } 398 return IdxN; 399 } 400 401 void FastISel::recomputeInsertPt() { 402 if (getLastLocalValue()) { 403 FuncInfo.InsertPt = getLastLocalValue(); 404 FuncInfo.MBB = FuncInfo.InsertPt->getParent(); 405 ++FuncInfo.InsertPt; 406 } else 407 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI(); 408 } 409 410 void FastISel::removeDeadCode(MachineBasicBlock::iterator I, 411 MachineBasicBlock::iterator E) { 412 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 && 413 "Invalid iterator!"); 414 while (I != E) { 415 if (SavedInsertPt == I) 416 SavedInsertPt = E; 417 if (EmitStartPt == I) 418 EmitStartPt = E.isValid() ? &*E : nullptr; 419 if (LastLocalValue == I) 420 LastLocalValue = E.isValid() ? &*E : nullptr; 421 422 MachineInstr *Dead = &*I; 423 ++I; 424 Dead->eraseFromParent(); 425 ++NumFastIselDead; 426 } 427 recomputeInsertPt(); 428 } 429 430 FastISel::SavePoint FastISel::enterLocalValueArea() { 431 SavePoint OldInsertPt = FuncInfo.InsertPt; 432 recomputeInsertPt(); 433 return OldInsertPt; 434 } 435 436 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) { 437 if (FuncInfo.InsertPt != FuncInfo.MBB->begin()) 438 LastLocalValue = &*std::prev(FuncInfo.InsertPt); 439 440 // Restore the previous insert position. 441 FuncInfo.InsertPt = OldInsertPt; 442 } 443 444 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) { 445 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true); 446 if (VT == MVT::Other || !VT.isSimple()) 447 // Unhandled type. Halt "fast" selection and bail. 448 return false; 449 450 // We only handle legal types. For example, on x86-32 the instruction 451 // selector contains all of the 64-bit instructions from x86-64, 452 // under the assumption that i64 won't be used if the target doesn't 453 // support it. 454 if (!TLI.isTypeLegal(VT)) { 455 // MVT::i1 is special. Allow AND, OR, or XOR because they 456 // don't require additional zeroing, which makes them easy. 457 if (VT == MVT::i1 && ISD::isBitwiseLogicOp(ISDOpcode)) 458 VT = TLI.getTypeToTransformTo(I->getContext(), VT); 459 else 460 return false; 461 } 462 463 // Check if the first operand is a constant, and handle it as "ri". At -O0, 464 // we don't have anything that canonicalizes operand order. 465 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0))) 466 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) { 467 Register Op1 = getRegForValue(I->getOperand(1)); 468 if (!Op1) 469 return false; 470 471 Register ResultReg = 472 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(), 473 VT.getSimpleVT()); 474 if (!ResultReg) 475 return false; 476 477 // We successfully emitted code for the given LLVM Instruction. 478 updateValueMap(I, ResultReg); 479 return true; 480 } 481 482 Register Op0 = getRegForValue(I->getOperand(0)); 483 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 484 return false; 485 486 // Check if the second operand is a constant and handle it appropriately. 487 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 488 uint64_t Imm = CI->getSExtValue(); 489 490 // Transform "sdiv exact X, 8" -> "sra X, 3". 491 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) && 492 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) { 493 Imm = Log2_64(Imm); 494 ISDOpcode = ISD::SRA; 495 } 496 497 // Transform "urem x, pow2" -> "and x, pow2-1". 498 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) && 499 isPowerOf2_64(Imm)) { 500 --Imm; 501 ISDOpcode = ISD::AND; 502 } 503 504 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm, 505 VT.getSimpleVT()); 506 if (!ResultReg) 507 return false; 508 509 // We successfully emitted code for the given LLVM Instruction. 510 updateValueMap(I, ResultReg); 511 return true; 512 } 513 514 Register Op1 = getRegForValue(I->getOperand(1)); 515 if (!Op1) // Unhandled operand. Halt "fast" selection and bail. 516 return false; 517 518 // Now we have both operands in registers. Emit the instruction. 519 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), 520 ISDOpcode, Op0, Op1); 521 if (!ResultReg) 522 // Target-specific code wasn't able to find a machine opcode for 523 // the given ISD opcode and type. Halt "fast" selection and bail. 524 return false; 525 526 // We successfully emitted code for the given LLVM Instruction. 527 updateValueMap(I, ResultReg); 528 return true; 529 } 530 531 bool FastISel::selectGetElementPtr(const User *I) { 532 Register N = getRegForValue(I->getOperand(0)); 533 if (!N) // Unhandled operand. Halt "fast" selection and bail. 534 return false; 535 536 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection 537 // and bail. 538 if (isa<VectorType>(I->getType())) 539 return false; 540 541 // Keep a running tab of the total offset to coalesce multiple N = N + Offset 542 // into a single N = N + TotalOffset. 543 uint64_t TotalOffs = 0; 544 // FIXME: What's a good SWAG number for MaxOffs? 545 uint64_t MaxOffs = 2048; 546 MVT VT = TLI.getPointerTy(DL); 547 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I); 548 GTI != E; ++GTI) { 549 const Value *Idx = GTI.getOperand(); 550 if (StructType *StTy = GTI.getStructTypeOrNull()) { 551 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue(); 552 if (Field) { 553 // N = N + Offset 554 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field); 555 if (TotalOffs >= MaxOffs) { 556 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 557 if (!N) // Unhandled operand. Halt "fast" selection and bail. 558 return false; 559 TotalOffs = 0; 560 } 561 } 562 } else { 563 Type *Ty = GTI.getIndexedType(); 564 565 // If this is a constant subscript, handle it quickly. 566 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 567 if (CI->isZero()) 568 continue; 569 // N = N + Offset 570 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue(); 571 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN; 572 if (TotalOffs >= MaxOffs) { 573 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 574 if (!N) // Unhandled operand. Halt "fast" selection and bail. 575 return false; 576 TotalOffs = 0; 577 } 578 continue; 579 } 580 if (TotalOffs) { 581 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 582 if (!N) // Unhandled operand. Halt "fast" selection and bail. 583 return false; 584 TotalOffs = 0; 585 } 586 587 // N = N + Idx * ElementSize; 588 uint64_t ElementSize = DL.getTypeAllocSize(Ty); 589 Register IdxN = getRegForGEPIndex(Idx); 590 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 591 return false; 592 593 if (ElementSize != 1) { 594 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT); 595 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 596 return false; 597 } 598 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN); 599 if (!N) // Unhandled operand. Halt "fast" selection and bail. 600 return false; 601 } 602 } 603 if (TotalOffs) { 604 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 605 if (!N) // Unhandled operand. Halt "fast" selection and bail. 606 return false; 607 } 608 609 // We successfully emitted code for the given LLVM Instruction. 610 updateValueMap(I, N); 611 return true; 612 } 613 614 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops, 615 const CallInst *CI, unsigned StartIdx) { 616 for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) { 617 Value *Val = CI->getArgOperand(i); 618 // Check for constants and encode them with a StackMaps::ConstantOp prefix. 619 if (const auto *C = dyn_cast<ConstantInt>(Val)) { 620 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 621 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue())); 622 } else if (isa<ConstantPointerNull>(Val)) { 623 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 624 Ops.push_back(MachineOperand::CreateImm(0)); 625 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) { 626 // Values coming from a stack location also require a special encoding, 627 // but that is added later on by the target specific frame index 628 // elimination implementation. 629 auto SI = FuncInfo.StaticAllocaMap.find(AI); 630 if (SI != FuncInfo.StaticAllocaMap.end()) 631 Ops.push_back(MachineOperand::CreateFI(SI->second)); 632 else 633 return false; 634 } else { 635 Register Reg = getRegForValue(Val); 636 if (!Reg) 637 return false; 638 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 639 } 640 } 641 return true; 642 } 643 644 bool FastISel::selectStackmap(const CallInst *I) { 645 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>, 646 // [live variables...]) 647 assert(I->getCalledFunction()->getReturnType()->isVoidTy() && 648 "Stackmap cannot return a value."); 649 650 // The stackmap intrinsic only records the live variables (the arguments 651 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 652 // intrinsic, this won't be lowered to a function call. This means we don't 653 // have to worry about calling conventions and target-specific lowering code. 654 // Instead we perform the call lowering right here. 655 // 656 // CALLSEQ_START(0, 0...) 657 // STACKMAP(id, nbytes, ...) 658 // CALLSEQ_END(0, 0) 659 // 660 SmallVector<MachineOperand, 32> Ops; 661 662 // Add the <id> and <numBytes> constants. 663 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 664 "Expected a constant integer."); 665 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 666 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 667 668 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 669 "Expected a constant integer."); 670 const auto *NumBytes = 671 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 672 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 673 674 // Push live variables for the stack map (skipping the first two arguments 675 // <id> and <numBytes>). 676 if (!addStackMapLiveVars(Ops, I, 2)) 677 return false; 678 679 // We are not adding any register mask info here, because the stackmap doesn't 680 // clobber anything. 681 682 // Add scratch registers as implicit def and early clobber. 683 CallingConv::ID CC = I->getCallingConv(); 684 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 685 for (unsigned i = 0; ScratchRegs[i]; ++i) 686 Ops.push_back(MachineOperand::CreateReg( 687 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false, 688 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true)); 689 690 // Issue CALLSEQ_START 691 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 692 auto Builder = 693 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown)); 694 const MCInstrDesc &MCID = Builder.getInstr()->getDesc(); 695 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I) 696 Builder.addImm(0); 697 698 // Issue STACKMAP. 699 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 700 TII.get(TargetOpcode::STACKMAP)); 701 for (auto const &MO : Ops) 702 MIB.add(MO); 703 704 // Issue CALLSEQ_END 705 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 706 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp)) 707 .addImm(0) 708 .addImm(0); 709 710 // Inform the Frame Information that we have a stackmap in this function. 711 FuncInfo.MF->getFrameInfo().setHasStackMap(); 712 713 return true; 714 } 715 716 /// Lower an argument list according to the target calling convention. 717 /// 718 /// This is a helper for lowering intrinsics that follow a target calling 719 /// convention or require stack pointer adjustment. Only a subset of the 720 /// intrinsic's operands need to participate in the calling convention. 721 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx, 722 unsigned NumArgs, const Value *Callee, 723 bool ForceRetVoidTy, CallLoweringInfo &CLI) { 724 ArgListTy Args; 725 Args.reserve(NumArgs); 726 727 // Populate the argument list. 728 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) { 729 Value *V = CI->getOperand(ArgI); 730 731 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 732 733 ArgListEntry Entry; 734 Entry.Val = V; 735 Entry.Ty = V->getType(); 736 Entry.setAttributes(CI, ArgI); 737 Args.push_back(Entry); 738 } 739 740 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext()) 741 : CI->getType(); 742 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs); 743 744 return lowerCallTo(CLI); 745 } 746 747 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee( 748 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy, 749 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) { 750 SmallString<32> MangledName; 751 Mangler::getNameWithPrefix(MangledName, Target, DL); 752 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName); 753 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs); 754 } 755 756 bool FastISel::selectPatchpoint(const CallInst *I) { 757 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, 758 // i32 <numBytes>, 759 // i8* <target>, 760 // i32 <numArgs>, 761 // [Args...], 762 // [live variables...]) 763 CallingConv::ID CC = I->getCallingConv(); 764 bool IsAnyRegCC = CC == CallingConv::AnyReg; 765 bool HasDef = !I->getType()->isVoidTy(); 766 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts(); 767 768 // Get the real number of arguments participating in the call <numArgs> 769 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) && 770 "Expected a constant integer."); 771 const auto *NumArgsVal = 772 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)); 773 unsigned NumArgs = NumArgsVal->getZExtValue(); 774 775 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 776 // This includes all meta-operands up to but not including CC. 777 unsigned NumMetaOpers = PatchPointOpers::CCPos; 778 assert(I->arg_size() >= NumMetaOpers + NumArgs && 779 "Not enough arguments provided to the patchpoint intrinsic"); 780 781 // For AnyRegCC the arguments are lowered later on manually. 782 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; 783 CallLoweringInfo CLI; 784 CLI.setIsPatchPoint(); 785 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI)) 786 return false; 787 788 assert(CLI.Call && "No call instruction specified."); 789 790 SmallVector<MachineOperand, 32> Ops; 791 792 // Add an explicit result reg if we use the anyreg calling convention. 793 if (IsAnyRegCC && HasDef) { 794 assert(CLI.NumResultRegs == 0 && "Unexpected result register."); 795 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64)); 796 CLI.NumResultRegs = 1; 797 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true)); 798 } 799 800 // Add the <id> and <numBytes> constants. 801 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 802 "Expected a constant integer."); 803 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 804 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 805 806 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 807 "Expected a constant integer."); 808 const auto *NumBytes = 809 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 810 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 811 812 // Add the call target. 813 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) { 814 uint64_t CalleeConstAddr = 815 cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 816 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr)); 817 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) { 818 if (C->getOpcode() == Instruction::IntToPtr) { 819 uint64_t CalleeConstAddr = 820 cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 821 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr)); 822 } else 823 llvm_unreachable("Unsupported ConstantExpr."); 824 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) { 825 Ops.push_back(MachineOperand::CreateGA(GV, 0)); 826 } else if (isa<ConstantPointerNull>(Callee)) 827 Ops.push_back(MachineOperand::CreateImm(0)); 828 else 829 llvm_unreachable("Unsupported callee address."); 830 831 // Adjust <numArgs> to account for any arguments that have been passed on 832 // the stack instead. 833 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size(); 834 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs)); 835 836 // Add the calling convention 837 Ops.push_back(MachineOperand::CreateImm((unsigned)CC)); 838 839 // Add the arguments we omitted previously. The register allocator should 840 // place these in any free register. 841 if (IsAnyRegCC) { 842 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) { 843 Register Reg = getRegForValue(I->getArgOperand(i)); 844 if (!Reg) 845 return false; 846 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 847 } 848 } 849 850 // Push the arguments from the call instruction. 851 for (auto Reg : CLI.OutRegs) 852 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 853 854 // Push live variables for the stack map. 855 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs)) 856 return false; 857 858 // Push the register mask info. 859 Ops.push_back(MachineOperand::CreateRegMask( 860 TRI.getCallPreservedMask(*FuncInfo.MF, CC))); 861 862 // Add scratch registers as implicit def and early clobber. 863 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 864 for (unsigned i = 0; ScratchRegs[i]; ++i) 865 Ops.push_back(MachineOperand::CreateReg( 866 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false, 867 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true)); 868 869 // Add implicit defs (return values). 870 for (auto Reg : CLI.InRegs) 871 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true, 872 /*isImp=*/true)); 873 874 // Insert the patchpoint instruction before the call generated by the target. 875 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, MIMD, 876 TII.get(TargetOpcode::PATCHPOINT)); 877 878 for (auto &MO : Ops) 879 MIB.add(MO); 880 881 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI); 882 883 // Delete the original call instruction. 884 CLI.Call->eraseFromParent(); 885 886 // Inform the Frame Information that we have a patchpoint in this function. 887 FuncInfo.MF->getFrameInfo().setHasPatchPoint(); 888 889 if (CLI.NumResultRegs) 890 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs); 891 return true; 892 } 893 894 bool FastISel::selectXRayCustomEvent(const CallInst *I) { 895 const auto &Triple = TM.getTargetTriple(); 896 if (Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64) 897 return true; // don't do anything to this instruction. 898 SmallVector<MachineOperand, 8> Ops; 899 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)), 900 /*isDef=*/false)); 901 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)), 902 /*isDef=*/false)); 903 MachineInstrBuilder MIB = 904 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 905 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL)); 906 for (auto &MO : Ops) 907 MIB.add(MO); 908 909 // Insert the Patchable Event Call instruction, that gets lowered properly. 910 return true; 911 } 912 913 bool FastISel::selectXRayTypedEvent(const CallInst *I) { 914 const auto &Triple = TM.getTargetTriple(); 915 if (Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64) 916 return true; // don't do anything to this instruction. 917 SmallVector<MachineOperand, 8> Ops; 918 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)), 919 /*isDef=*/false)); 920 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)), 921 /*isDef=*/false)); 922 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)), 923 /*isDef=*/false)); 924 MachineInstrBuilder MIB = 925 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 926 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL)); 927 for (auto &MO : Ops) 928 MIB.add(MO); 929 930 // Insert the Patchable Typed Event Call instruction, that gets lowered properly. 931 return true; 932 } 933 934 /// Returns an AttributeList representing the attributes applied to the return 935 /// value of the given call. 936 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) { 937 SmallVector<Attribute::AttrKind, 2> Attrs; 938 if (CLI.RetSExt) 939 Attrs.push_back(Attribute::SExt); 940 if (CLI.RetZExt) 941 Attrs.push_back(Attribute::ZExt); 942 if (CLI.IsInReg) 943 Attrs.push_back(Attribute::InReg); 944 945 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex, 946 Attrs); 947 } 948 949 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName, 950 unsigned NumArgs) { 951 MCContext &Ctx = MF->getContext(); 952 SmallString<32> MangledName; 953 Mangler::getNameWithPrefix(MangledName, SymName, DL); 954 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName); 955 return lowerCallTo(CI, Sym, NumArgs); 956 } 957 958 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol, 959 unsigned NumArgs) { 960 FunctionType *FTy = CI->getFunctionType(); 961 Type *RetTy = CI->getType(); 962 963 ArgListTy Args; 964 Args.reserve(NumArgs); 965 966 // Populate the argument list. 967 // Attributes for args start at offset 1, after the return attribute. 968 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) { 969 Value *V = CI->getOperand(ArgI); 970 971 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 972 973 ArgListEntry Entry; 974 Entry.Val = V; 975 Entry.Ty = V->getType(); 976 Entry.setAttributes(CI, ArgI); 977 Args.push_back(Entry); 978 } 979 TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args); 980 981 CallLoweringInfo CLI; 982 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs); 983 984 return lowerCallTo(CLI); 985 } 986 987 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) { 988 // Handle the incoming return values from the call. 989 CLI.clearIns(); 990 SmallVector<EVT, 4> RetTys; 991 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys); 992 993 SmallVector<ISD::OutputArg, 4> Outs; 994 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL); 995 996 bool CanLowerReturn = TLI.CanLowerReturn( 997 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext()); 998 999 // FIXME: sret demotion isn't supported yet - bail out. 1000 if (!CanLowerReturn) 1001 return false; 1002 1003 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 1004 EVT VT = RetTys[I]; 1005 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT); 1006 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT); 1007 for (unsigned i = 0; i != NumRegs; ++i) { 1008 ISD::InputArg MyFlags; 1009 MyFlags.VT = RegisterVT; 1010 MyFlags.ArgVT = VT; 1011 MyFlags.Used = CLI.IsReturnValueUsed; 1012 if (CLI.RetSExt) 1013 MyFlags.Flags.setSExt(); 1014 if (CLI.RetZExt) 1015 MyFlags.Flags.setZExt(); 1016 if (CLI.IsInReg) 1017 MyFlags.Flags.setInReg(); 1018 CLI.Ins.push_back(MyFlags); 1019 } 1020 } 1021 1022 // Handle all of the outgoing arguments. 1023 CLI.clearOuts(); 1024 for (auto &Arg : CLI.getArgs()) { 1025 Type *FinalType = Arg.Ty; 1026 if (Arg.IsByVal) 1027 FinalType = Arg.IndirectType; 1028 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( 1029 FinalType, CLI.CallConv, CLI.IsVarArg, DL); 1030 1031 ISD::ArgFlagsTy Flags; 1032 if (Arg.IsZExt) 1033 Flags.setZExt(); 1034 if (Arg.IsSExt) 1035 Flags.setSExt(); 1036 if (Arg.IsInReg) 1037 Flags.setInReg(); 1038 if (Arg.IsSRet) 1039 Flags.setSRet(); 1040 if (Arg.IsSwiftSelf) 1041 Flags.setSwiftSelf(); 1042 if (Arg.IsSwiftAsync) 1043 Flags.setSwiftAsync(); 1044 if (Arg.IsSwiftError) 1045 Flags.setSwiftError(); 1046 if (Arg.IsCFGuardTarget) 1047 Flags.setCFGuardTarget(); 1048 if (Arg.IsByVal) 1049 Flags.setByVal(); 1050 if (Arg.IsInAlloca) { 1051 Flags.setInAlloca(); 1052 // Set the byval flag for CCAssignFn callbacks that don't know about 1053 // inalloca. This way we can know how many bytes we should've allocated 1054 // and how many bytes a callee cleanup function will pop. If we port 1055 // inalloca to more targets, we'll have to add custom inalloca handling in 1056 // the various CC lowering callbacks. 1057 Flags.setByVal(); 1058 } 1059 if (Arg.IsPreallocated) { 1060 Flags.setPreallocated(); 1061 // Set the byval flag for CCAssignFn callbacks that don't know about 1062 // preallocated. This way we can know how many bytes we should've 1063 // allocated and how many bytes a callee cleanup function will pop. If we 1064 // port preallocated to more targets, we'll have to add custom 1065 // preallocated handling in the various CC lowering callbacks. 1066 Flags.setByVal(); 1067 } 1068 MaybeAlign MemAlign = Arg.Alignment; 1069 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) { 1070 unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType); 1071 1072 // For ByVal, alignment should come from FE. BE will guess if this info 1073 // is not there, but there are cases it cannot get right. 1074 if (!MemAlign) 1075 MemAlign = Align(TLI.getByValTypeAlignment(Arg.IndirectType, DL)); 1076 Flags.setByValSize(FrameSize); 1077 } else if (!MemAlign) { 1078 MemAlign = DL.getABITypeAlign(Arg.Ty); 1079 } 1080 Flags.setMemAlign(*MemAlign); 1081 if (Arg.IsNest) 1082 Flags.setNest(); 1083 if (NeedsRegBlock) 1084 Flags.setInConsecutiveRegs(); 1085 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty)); 1086 CLI.OutVals.push_back(Arg.Val); 1087 CLI.OutFlags.push_back(Flags); 1088 } 1089 1090 if (!fastLowerCall(CLI)) 1091 return false; 1092 1093 // Set all unused physreg defs as dead. 1094 assert(CLI.Call && "No call instruction specified."); 1095 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI); 1096 1097 if (CLI.NumResultRegs && CLI.CB) 1098 updateValueMap(CLI.CB, CLI.ResultReg, CLI.NumResultRegs); 1099 1100 // Set labels for heapallocsite call. 1101 if (CLI.CB) 1102 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite")) 1103 CLI.Call->setHeapAllocMarker(*MF, MD); 1104 1105 return true; 1106 } 1107 1108 bool FastISel::lowerCall(const CallInst *CI) { 1109 FunctionType *FuncTy = CI->getFunctionType(); 1110 Type *RetTy = CI->getType(); 1111 1112 ArgListTy Args; 1113 ArgListEntry Entry; 1114 Args.reserve(CI->arg_size()); 1115 1116 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) { 1117 Value *V = *i; 1118 1119 // Skip empty types 1120 if (V->getType()->isEmptyTy()) 1121 continue; 1122 1123 Entry.Val = V; 1124 Entry.Ty = V->getType(); 1125 1126 // Skip the first return-type Attribute to get to params. 1127 Entry.setAttributes(CI, i - CI->arg_begin()); 1128 Args.push_back(Entry); 1129 } 1130 1131 // Check if target-independent constraints permit a tail call here. 1132 // Target-dependent constraints are checked within fastLowerCall. 1133 bool IsTailCall = CI->isTailCall(); 1134 if (IsTailCall && !isInTailCallPosition(*CI, TM)) 1135 IsTailCall = false; 1136 if (IsTailCall && !CI->isMustTailCall() && 1137 MF->getFunction().getFnAttribute("disable-tail-calls").getValueAsBool()) 1138 IsTailCall = false; 1139 1140 CallLoweringInfo CLI; 1141 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI) 1142 .setTailCall(IsTailCall); 1143 1144 diagnoseDontCall(*CI); 1145 1146 return lowerCallTo(CLI); 1147 } 1148 1149 bool FastISel::selectCall(const User *I) { 1150 const CallInst *Call = cast<CallInst>(I); 1151 1152 // Handle simple inline asms. 1153 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) { 1154 // Don't attempt to handle constraints. 1155 if (!IA->getConstraintString().empty()) 1156 return false; 1157 1158 unsigned ExtraInfo = 0; 1159 if (IA->hasSideEffects()) 1160 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1161 if (IA->isAlignStack()) 1162 ExtraInfo |= InlineAsm::Extra_IsAlignStack; 1163 if (Call->isConvergent()) 1164 ExtraInfo |= InlineAsm::Extra_IsConvergent; 1165 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect; 1166 1167 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 1168 TII.get(TargetOpcode::INLINEASM)); 1169 MIB.addExternalSymbol(IA->getAsmString().c_str()); 1170 MIB.addImm(ExtraInfo); 1171 1172 const MDNode *SrcLoc = Call->getMetadata("srcloc"); 1173 if (SrcLoc) 1174 MIB.addMetadata(SrcLoc); 1175 1176 return true; 1177 } 1178 1179 // Handle intrinsic function calls. 1180 if (const auto *II = dyn_cast<IntrinsicInst>(Call)) 1181 return selectIntrinsicCall(II); 1182 1183 return lowerCall(Call); 1184 } 1185 1186 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { 1187 switch (II->getIntrinsicID()) { 1188 default: 1189 break; 1190 // At -O0 we don't care about the lifetime intrinsics. 1191 case Intrinsic::lifetime_start: 1192 case Intrinsic::lifetime_end: 1193 // The donothing intrinsic does, well, nothing. 1194 case Intrinsic::donothing: 1195 // Neither does the sideeffect intrinsic. 1196 case Intrinsic::sideeffect: 1197 // Neither does the assume intrinsic; it's also OK not to codegen its operand. 1198 case Intrinsic::assume: 1199 // Neither does the llvm.experimental.noalias.scope.decl intrinsic 1200 case Intrinsic::experimental_noalias_scope_decl: 1201 return true; 1202 case Intrinsic::dbg_declare: { 1203 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II); 1204 assert(DI->getVariable() && "Missing variable"); 1205 if (!FuncInfo.MF->getMMI().hasDebugInfo()) { 1206 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI 1207 << " (!hasDebugInfo)\n"); 1208 return true; 1209 } 1210 1211 if (FuncInfo.PreprocessedDbgDeclares.contains(DI)) 1212 return true; 1213 1214 const Value *Address = DI->getAddress(); 1215 if (!Address || isa<UndefValue>(Address)) { 1216 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI 1217 << " (bad/undef address)\n"); 1218 return true; 1219 } 1220 1221 std::optional<MachineOperand> Op; 1222 if (Register Reg = lookUpRegForValue(Address)) 1223 Op = MachineOperand::CreateReg(Reg, false); 1224 1225 // If we have a VLA that has a "use" in a metadata node that's then used 1226 // here but it has no other uses, then we have a problem. E.g., 1227 // 1228 // int foo (const int *x) { 1229 // char a[*x]; 1230 // return 0; 1231 // } 1232 // 1233 // If we assign 'a' a vreg and fast isel later on has to use the selection 1234 // DAG isel, it will want to copy the value to the vreg. However, there are 1235 // no uses, which goes counter to what selection DAG isel expects. 1236 if (!Op && !Address->use_empty() && isa<Instruction>(Address) && 1237 (!isa<AllocaInst>(Address) || 1238 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address)))) 1239 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address), 1240 false); 1241 1242 if (Op) { 1243 assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) && 1244 "Expected inlined-at fields to agree"); 1245 if (FuncInfo.MF->useDebugInstrRef() && Op->isReg()) { 1246 // If using instruction referencing, produce this as a DBG_INSTR_REF, 1247 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto 1248 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF. 1249 SmallVector<uint64_t, 3> Ops( 1250 {dwarf::DW_OP_LLVM_arg, 0, dwarf::DW_OP_deref}); 1251 auto *NewExpr = DIExpression::prependOpcodes(DI->getExpression(), Ops); 1252 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), 1253 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, *Op, 1254 DI->getVariable(), NewExpr); 1255 } else { 1256 // A dbg.declare describes the address of a source variable, so lower it 1257 // into an indirect DBG_VALUE. 1258 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), 1259 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op, 1260 DI->getVariable(), DI->getExpression()); 1261 } 1262 } else { 1263 // We can't yet handle anything else here because it would require 1264 // generating code, thus altering codegen because of debug info. 1265 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI 1266 << " (no materialized reg for address)\n"); 1267 } 1268 return true; 1269 } 1270 case Intrinsic::dbg_value: { 1271 // This form of DBG_VALUE is target-independent. 1272 const DbgValueInst *DI = cast<DbgValueInst>(II); 1273 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); 1274 const Value *V = DI->getValue(); 1275 DIExpression *Expr = DI->getExpression(); 1276 DILocalVariable *Var = DI->getVariable(); 1277 assert(Var->isValidLocationForIntrinsic(MIMD.getDL()) && 1278 "Expected inlined-at fields to agree"); 1279 if (!V || isa<UndefValue>(V) || DI->hasArgList()) { 1280 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an 1281 // undef DBG_VALUE to terminate any prior location. 1282 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, false, 0U, 1283 Var, Expr); 1284 return true; 1285 } 1286 if (const auto *CI = dyn_cast<ConstantInt>(V)) { 1287 // See if there's an expression to constant-fold. 1288 if (Expr) 1289 std::tie(Expr, CI) = Expr->constantFold(CI); 1290 if (CI->getBitWidth() > 64) 1291 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 1292 .addCImm(CI) 1293 .addImm(0U) 1294 .addMetadata(Var) 1295 .addMetadata(Expr); 1296 else 1297 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 1298 .addImm(CI->getZExtValue()) 1299 .addImm(0U) 1300 .addMetadata(Var) 1301 .addMetadata(Expr); 1302 return true; 1303 } 1304 if (const auto *CF = dyn_cast<ConstantFP>(V)) { 1305 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 1306 .addFPImm(CF) 1307 .addImm(0U) 1308 .addMetadata(Var) 1309 .addMetadata(Expr); 1310 return true; 1311 } 1312 if (const auto *Arg = dyn_cast<Argument>(V); 1313 Arg && Expr && Expr->isEntryValue()) { 1314 // As per the Verifier, this case is only valid for swift async Args. 1315 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync)); 1316 1317 Register Reg = getRegForValue(Arg); 1318 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins()) 1319 if (Reg == VirtReg || Reg == PhysReg) { 1320 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, 1321 false /*IsIndirect*/, PhysReg, Var, Expr); 1322 return true; 1323 } 1324 1325 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but " 1326 "couldn't find a physical register\n" 1327 << *DI << "\n"); 1328 return true; 1329 } 1330 if (auto SI = FuncInfo.StaticAllocaMap.find(dyn_cast<AllocaInst>(V)); 1331 SI != FuncInfo.StaticAllocaMap.end()) { 1332 MachineOperand FrameIndexOp = MachineOperand::CreateFI(SI->second); 1333 bool IsIndirect = false; 1334 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, IsIndirect, 1335 FrameIndexOp, Var, Expr); 1336 return true; 1337 } 1338 if (Register Reg = lookUpRegForValue(V)) { 1339 // FIXME: This does not handle register-indirect values at offset 0. 1340 if (!FuncInfo.MF->useDebugInstrRef()) { 1341 bool IsIndirect = false; 1342 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, IsIndirect, 1343 Reg, Var, Expr); 1344 return true; 1345 } 1346 // If using instruction referencing, produce this as a DBG_INSTR_REF, 1347 // to be later patched up by finalizeDebugInstrRefs. 1348 SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg( 1349 /* Reg */ Reg, /* isDef */ false, /* isImp */ false, 1350 /* isKill */ false, /* isDead */ false, 1351 /* isUndef */ false, /* isEarlyClobber */ false, 1352 /* SubReg */ 0, /* isDebug */ true)}); 1353 SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0}); 1354 auto *NewExpr = DIExpression::prependOpcodes(Expr, Ops); 1355 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), 1356 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, MOs, 1357 Var, NewExpr); 1358 return true; 1359 } 1360 // We don't know how to handle other cases, so we drop. 1361 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1362 return true; 1363 } 1364 case Intrinsic::dbg_label: { 1365 const DbgLabelInst *DI = cast<DbgLabelInst>(II); 1366 assert(DI->getLabel() && "Missing label"); 1367 if (!FuncInfo.MF->getMMI().hasDebugInfo()) { 1368 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1369 return true; 1370 } 1371 1372 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 1373 TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel()); 1374 return true; 1375 } 1376 case Intrinsic::objectsize: 1377 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 1378 1379 case Intrinsic::is_constant: 1380 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 1381 1382 case Intrinsic::launder_invariant_group: 1383 case Intrinsic::strip_invariant_group: 1384 case Intrinsic::expect: { 1385 Register ResultReg = getRegForValue(II->getArgOperand(0)); 1386 if (!ResultReg) 1387 return false; 1388 updateValueMap(II, ResultReg); 1389 return true; 1390 } 1391 case Intrinsic::experimental_stackmap: 1392 return selectStackmap(II); 1393 case Intrinsic::experimental_patchpoint_void: 1394 case Intrinsic::experimental_patchpoint_i64: 1395 return selectPatchpoint(II); 1396 1397 case Intrinsic::xray_customevent: 1398 return selectXRayCustomEvent(II); 1399 case Intrinsic::xray_typedevent: 1400 return selectXRayTypedEvent(II); 1401 } 1402 1403 return fastLowerIntrinsicCall(II); 1404 } 1405 1406 bool FastISel::selectCast(const User *I, unsigned Opcode) { 1407 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1408 EVT DstVT = TLI.getValueType(DL, I->getType()); 1409 1410 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other || 1411 !DstVT.isSimple()) 1412 // Unhandled type. Halt "fast" selection and bail. 1413 return false; 1414 1415 // Check if the destination type is legal. 1416 if (!TLI.isTypeLegal(DstVT)) 1417 return false; 1418 1419 // Check if the source operand is legal. 1420 if (!TLI.isTypeLegal(SrcVT)) 1421 return false; 1422 1423 Register InputReg = getRegForValue(I->getOperand(0)); 1424 if (!InputReg) 1425 // Unhandled operand. Halt "fast" selection and bail. 1426 return false; 1427 1428 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), 1429 Opcode, InputReg); 1430 if (!ResultReg) 1431 return false; 1432 1433 updateValueMap(I, ResultReg); 1434 return true; 1435 } 1436 1437 bool FastISel::selectBitCast(const User *I) { 1438 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1439 EVT DstEVT = TLI.getValueType(DL, I->getType()); 1440 if (SrcEVT == MVT::Other || DstEVT == MVT::Other || 1441 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT)) 1442 // Unhandled type. Halt "fast" selection and bail. 1443 return false; 1444 1445 MVT SrcVT = SrcEVT.getSimpleVT(); 1446 MVT DstVT = DstEVT.getSimpleVT(); 1447 Register Op0 = getRegForValue(I->getOperand(0)); 1448 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 1449 return false; 1450 1451 // If the bitcast doesn't change the type, just use the operand value. 1452 if (SrcVT == DstVT) { 1453 updateValueMap(I, Op0); 1454 return true; 1455 } 1456 1457 // Otherwise, select a BITCAST opcode. 1458 Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0); 1459 if (!ResultReg) 1460 return false; 1461 1462 updateValueMap(I, ResultReg); 1463 return true; 1464 } 1465 1466 bool FastISel::selectFreeze(const User *I) { 1467 Register Reg = getRegForValue(I->getOperand(0)); 1468 if (!Reg) 1469 // Unhandled operand. 1470 return false; 1471 1472 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType()); 1473 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy)) 1474 // Unhandled type, bail out. 1475 return false; 1476 1477 MVT Ty = ETy.getSimpleVT(); 1478 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty); 1479 Register ResultReg = createResultReg(TyRegClass); 1480 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 1481 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg); 1482 1483 updateValueMap(I, ResultReg); 1484 return true; 1485 } 1486 1487 // Remove local value instructions starting from the instruction after 1488 // SavedLastLocalValue to the current function insert point. 1489 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue) 1490 { 1491 MachineInstr *CurLastLocalValue = getLastLocalValue(); 1492 if (CurLastLocalValue != SavedLastLocalValue) { 1493 // Find the first local value instruction to be deleted. 1494 // This is the instruction after SavedLastLocalValue if it is non-NULL. 1495 // Otherwise it's the first instruction in the block. 1496 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue); 1497 if (SavedLastLocalValue) 1498 ++FirstDeadInst; 1499 else 1500 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI(); 1501 setLastLocalValue(SavedLastLocalValue); 1502 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt); 1503 } 1504 } 1505 1506 bool FastISel::selectInstruction(const Instruction *I) { 1507 // Flush the local value map before starting each instruction. 1508 // This improves locality and debugging, and can reduce spills. 1509 // Reuse of values across IR instructions is relatively uncommon. 1510 flushLocalValueMap(); 1511 1512 MachineInstr *SavedLastLocalValue = getLastLocalValue(); 1513 // Just before the terminator instruction, insert instructions to 1514 // feed PHI nodes in successor blocks. 1515 if (I->isTerminator()) { 1516 if (!handlePHINodesInSuccessorBlocks(I->getParent())) { 1517 // PHI node handling may have generated local value instructions, 1518 // even though it failed to handle all PHI nodes. 1519 // We remove these instructions because SelectionDAGISel will generate 1520 // them again. 1521 removeDeadLocalValueCode(SavedLastLocalValue); 1522 return false; 1523 } 1524 } 1525 1526 // FastISel does not handle any operand bundles except OB_funclet. 1527 if (auto *Call = dyn_cast<CallBase>(I)) 1528 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i) 1529 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet) 1530 return false; 1531 1532 MIMD = MIMetadata(*I); 1533 1534 SavedInsertPt = FuncInfo.InsertPt; 1535 1536 if (const auto *Call = dyn_cast<CallInst>(I)) { 1537 const Function *F = Call->getCalledFunction(); 1538 LibFunc Func; 1539 1540 // As a special case, don't handle calls to builtin library functions that 1541 // may be translated directly to target instructions. 1542 if (F && !F->hasLocalLinkage() && F->hasName() && 1543 LibInfo->getLibFunc(F->getName(), Func) && 1544 LibInfo->hasOptimizedCodeGen(Func)) 1545 return false; 1546 1547 // Don't handle Intrinsic::trap if a trap function is specified. 1548 if (F && F->getIntrinsicID() == Intrinsic::trap && 1549 Call->hasFnAttr("trap-func-name")) 1550 return false; 1551 } 1552 1553 // First, try doing target-independent selection. 1554 if (!SkipTargetIndependentISel) { 1555 if (selectOperator(I, I->getOpcode())) { 1556 ++NumFastIselSuccessIndependent; 1557 MIMD = {}; 1558 return true; 1559 } 1560 // Remove dead code. 1561 recomputeInsertPt(); 1562 if (SavedInsertPt != FuncInfo.InsertPt) 1563 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1564 SavedInsertPt = FuncInfo.InsertPt; 1565 } 1566 // Next, try calling the target to attempt to handle the instruction. 1567 if (fastSelectInstruction(I)) { 1568 ++NumFastIselSuccessTarget; 1569 MIMD = {}; 1570 return true; 1571 } 1572 // Remove dead code. 1573 recomputeInsertPt(); 1574 if (SavedInsertPt != FuncInfo.InsertPt) 1575 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1576 1577 MIMD = {}; 1578 // Undo phi node updates, because they will be added again by SelectionDAG. 1579 if (I->isTerminator()) { 1580 // PHI node handling may have generated local value instructions. 1581 // We remove them because SelectionDAGISel will generate them again. 1582 removeDeadLocalValueCode(SavedLastLocalValue); 1583 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 1584 } 1585 return false; 1586 } 1587 1588 /// Emit an unconditional branch to the given block, unless it is the immediate 1589 /// (fall-through) successor, and update the CFG. 1590 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, 1591 const DebugLoc &DbgLoc) { 1592 if (FuncInfo.MBB->getBasicBlock()->sizeWithoutDebug() > 1 && 1593 FuncInfo.MBB->isLayoutSuccessor(MSucc)) { 1594 // For more accurate line information if this is the only non-debug 1595 // instruction in the block then emit it, otherwise we have the 1596 // unconditional fall-through case, which needs no instructions. 1597 } else { 1598 // The unconditional branch case. 1599 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr, 1600 SmallVector<MachineOperand, 0>(), DbgLoc); 1601 } 1602 if (FuncInfo.BPI) { 1603 auto BranchProbability = FuncInfo.BPI->getEdgeProbability( 1604 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock()); 1605 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability); 1606 } else 1607 FuncInfo.MBB->addSuccessorWithoutProb(MSucc); 1608 } 1609 1610 void FastISel::finishCondBranch(const BasicBlock *BranchBB, 1611 MachineBasicBlock *TrueMBB, 1612 MachineBasicBlock *FalseMBB) { 1613 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can 1614 // happen in degenerate IR and MachineIR forbids to have a block twice in the 1615 // successor/predecessor lists. 1616 if (TrueMBB != FalseMBB) { 1617 if (FuncInfo.BPI) { 1618 auto BranchProbability = 1619 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock()); 1620 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability); 1621 } else 1622 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB); 1623 } 1624 1625 fastEmitBranch(FalseMBB, MIMD.getDL()); 1626 } 1627 1628 /// Emit an FNeg operation. 1629 bool FastISel::selectFNeg(const User *I, const Value *In) { 1630 Register OpReg = getRegForValue(In); 1631 if (!OpReg) 1632 return false; 1633 1634 // If the target has ISD::FNEG, use it. 1635 EVT VT = TLI.getValueType(DL, I->getType()); 1636 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG, 1637 OpReg); 1638 if (ResultReg) { 1639 updateValueMap(I, ResultReg); 1640 return true; 1641 } 1642 1643 // Bitcast the value to integer, twiddle the sign bit with xor, 1644 // and then bitcast it back to floating-point. 1645 if (VT.getSizeInBits() > 64) 1646 return false; 1647 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits()); 1648 if (!TLI.isTypeLegal(IntVT)) 1649 return false; 1650 1651 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), 1652 ISD::BITCAST, OpReg); 1653 if (!IntReg) 1654 return false; 1655 1656 Register IntResultReg = fastEmit_ri_( 1657 IntVT.getSimpleVT(), ISD::XOR, IntReg, 1658 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT()); 1659 if (!IntResultReg) 1660 return false; 1661 1662 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST, 1663 IntResultReg); 1664 if (!ResultReg) 1665 return false; 1666 1667 updateValueMap(I, ResultReg); 1668 return true; 1669 } 1670 1671 bool FastISel::selectExtractValue(const User *U) { 1672 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U); 1673 if (!EVI) 1674 return false; 1675 1676 // Make sure we only try to handle extracts with a legal result. But also 1677 // allow i1 because it's easy. 1678 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true); 1679 if (!RealVT.isSimple()) 1680 return false; 1681 MVT VT = RealVT.getSimpleVT(); 1682 if (!TLI.isTypeLegal(VT) && VT != MVT::i1) 1683 return false; 1684 1685 const Value *Op0 = EVI->getOperand(0); 1686 Type *AggTy = Op0->getType(); 1687 1688 // Get the base result register. 1689 unsigned ResultReg; 1690 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(Op0); 1691 if (I != FuncInfo.ValueMap.end()) 1692 ResultReg = I->second; 1693 else if (isa<Instruction>(Op0)) 1694 ResultReg = FuncInfo.InitializeRegForValue(Op0); 1695 else 1696 return false; // fast-isel can't handle aggregate constants at the moment 1697 1698 // Get the actual result register, which is an offset from the base register. 1699 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices()); 1700 1701 SmallVector<EVT, 4> AggValueVTs; 1702 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs); 1703 1704 for (unsigned i = 0; i < VTIndex; i++) 1705 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]); 1706 1707 updateValueMap(EVI, ResultReg); 1708 return true; 1709 } 1710 1711 bool FastISel::selectOperator(const User *I, unsigned Opcode) { 1712 switch (Opcode) { 1713 case Instruction::Add: 1714 return selectBinaryOp(I, ISD::ADD); 1715 case Instruction::FAdd: 1716 return selectBinaryOp(I, ISD::FADD); 1717 case Instruction::Sub: 1718 return selectBinaryOp(I, ISD::SUB); 1719 case Instruction::FSub: 1720 return selectBinaryOp(I, ISD::FSUB); 1721 case Instruction::Mul: 1722 return selectBinaryOp(I, ISD::MUL); 1723 case Instruction::FMul: 1724 return selectBinaryOp(I, ISD::FMUL); 1725 case Instruction::SDiv: 1726 return selectBinaryOp(I, ISD::SDIV); 1727 case Instruction::UDiv: 1728 return selectBinaryOp(I, ISD::UDIV); 1729 case Instruction::FDiv: 1730 return selectBinaryOp(I, ISD::FDIV); 1731 case Instruction::SRem: 1732 return selectBinaryOp(I, ISD::SREM); 1733 case Instruction::URem: 1734 return selectBinaryOp(I, ISD::UREM); 1735 case Instruction::FRem: 1736 return selectBinaryOp(I, ISD::FREM); 1737 case Instruction::Shl: 1738 return selectBinaryOp(I, ISD::SHL); 1739 case Instruction::LShr: 1740 return selectBinaryOp(I, ISD::SRL); 1741 case Instruction::AShr: 1742 return selectBinaryOp(I, ISD::SRA); 1743 case Instruction::And: 1744 return selectBinaryOp(I, ISD::AND); 1745 case Instruction::Or: 1746 return selectBinaryOp(I, ISD::OR); 1747 case Instruction::Xor: 1748 return selectBinaryOp(I, ISD::XOR); 1749 1750 case Instruction::FNeg: 1751 return selectFNeg(I, I->getOperand(0)); 1752 1753 case Instruction::GetElementPtr: 1754 return selectGetElementPtr(I); 1755 1756 case Instruction::Br: { 1757 const BranchInst *BI = cast<BranchInst>(I); 1758 1759 if (BI->isUnconditional()) { 1760 const BasicBlock *LLVMSucc = BI->getSuccessor(0); 1761 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc]; 1762 fastEmitBranch(MSucc, BI->getDebugLoc()); 1763 return true; 1764 } 1765 1766 // Conditional branches are not handed yet. 1767 // Halt "fast" selection and bail. 1768 return false; 1769 } 1770 1771 case Instruction::Unreachable: 1772 if (TM.Options.TrapUnreachable) 1773 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0; 1774 else 1775 return true; 1776 1777 case Instruction::Alloca: 1778 // FunctionLowering has the static-sized case covered. 1779 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I))) 1780 return true; 1781 1782 // Dynamic-sized alloca is not handled yet. 1783 return false; 1784 1785 case Instruction::Call: 1786 // On AIX, normal call lowering uses the DAG-ISEL path currently so that the 1787 // callee of the direct function call instruction will be mapped to the 1788 // symbol for the function's entry point, which is distinct from the 1789 // function descriptor symbol. The latter is the symbol whose XCOFF symbol 1790 // name is the C-linkage name of the source level function. 1791 // But fast isel still has the ability to do selection for intrinsics. 1792 if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I)) 1793 return false; 1794 return selectCall(I); 1795 1796 case Instruction::BitCast: 1797 return selectBitCast(I); 1798 1799 case Instruction::FPToSI: 1800 return selectCast(I, ISD::FP_TO_SINT); 1801 case Instruction::ZExt: 1802 return selectCast(I, ISD::ZERO_EXTEND); 1803 case Instruction::SExt: 1804 return selectCast(I, ISD::SIGN_EXTEND); 1805 case Instruction::Trunc: 1806 return selectCast(I, ISD::TRUNCATE); 1807 case Instruction::SIToFP: 1808 return selectCast(I, ISD::SINT_TO_FP); 1809 1810 case Instruction::IntToPtr: // Deliberate fall-through. 1811 case Instruction::PtrToInt: { 1812 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1813 EVT DstVT = TLI.getValueType(DL, I->getType()); 1814 if (DstVT.bitsGT(SrcVT)) 1815 return selectCast(I, ISD::ZERO_EXTEND); 1816 if (DstVT.bitsLT(SrcVT)) 1817 return selectCast(I, ISD::TRUNCATE); 1818 Register Reg = getRegForValue(I->getOperand(0)); 1819 if (!Reg) 1820 return false; 1821 updateValueMap(I, Reg); 1822 return true; 1823 } 1824 1825 case Instruction::ExtractValue: 1826 return selectExtractValue(I); 1827 1828 case Instruction::Freeze: 1829 return selectFreeze(I); 1830 1831 case Instruction::PHI: 1832 llvm_unreachable("FastISel shouldn't visit PHI nodes!"); 1833 1834 default: 1835 // Unhandled instruction. Halt "fast" selection and bail. 1836 return false; 1837 } 1838 } 1839 1840 FastISel::FastISel(FunctionLoweringInfo &FuncInfo, 1841 const TargetLibraryInfo *LibInfo, 1842 bool SkipTargetIndependentISel) 1843 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()), 1844 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), 1845 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()), 1846 TII(*MF->getSubtarget().getInstrInfo()), 1847 TLI(*MF->getSubtarget().getTargetLowering()), 1848 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo), 1849 SkipTargetIndependentISel(SkipTargetIndependentISel) {} 1850 1851 FastISel::~FastISel() = default; 1852 1853 bool FastISel::fastLowerArguments() { return false; } 1854 1855 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; } 1856 1857 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) { 1858 return false; 1859 } 1860 1861 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; } 1862 1863 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) { 1864 return 0; 1865 } 1866 1867 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/, 1868 unsigned /*Op1*/) { 1869 return 0; 1870 } 1871 1872 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) { 1873 return 0; 1874 } 1875 1876 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned, 1877 const ConstantFP * /*FPImm*/) { 1878 return 0; 1879 } 1880 1881 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/, 1882 uint64_t /*Imm*/) { 1883 return 0; 1884 } 1885 1886 /// This method is a wrapper of fastEmit_ri. It first tries to emit an 1887 /// instruction with an immediate operand using fastEmit_ri. 1888 /// If that fails, it materializes the immediate into a register and try 1889 /// fastEmit_rr instead. 1890 Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, 1891 uint64_t Imm, MVT ImmType) { 1892 // If this is a multiply by a power of two, emit this as a shift left. 1893 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { 1894 Opcode = ISD::SHL; 1895 Imm = Log2_64(Imm); 1896 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) { 1897 // div x, 8 -> srl x, 3 1898 Opcode = ISD::SRL; 1899 Imm = Log2_64(Imm); 1900 } 1901 1902 // Horrible hack (to be removed), check to make sure shift amounts are 1903 // in-range. 1904 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) && 1905 Imm >= VT.getSizeInBits()) 1906 return 0; 1907 1908 // First check if immediate type is legal. If not, we can't use the ri form. 1909 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm); 1910 if (ResultReg) 1911 return ResultReg; 1912 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm); 1913 if (!MaterialReg) { 1914 // This is a bit ugly/slow, but failing here means falling out of 1915 // fast-isel, which would be very slow. 1916 IntegerType *ITy = 1917 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits()); 1918 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); 1919 if (!MaterialReg) 1920 return 0; 1921 } 1922 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg); 1923 } 1924 1925 Register FastISel::createResultReg(const TargetRegisterClass *RC) { 1926 return MRI.createVirtualRegister(RC); 1927 } 1928 1929 Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op, 1930 unsigned OpNum) { 1931 if (Op.isVirtual()) { 1932 const TargetRegisterClass *RegClass = 1933 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF); 1934 if (!MRI.constrainRegClass(Op, RegClass)) { 1935 // If it's not legal to COPY between the register classes, something 1936 // has gone very wrong before we got here. 1937 Register NewOp = createResultReg(RegClass); 1938 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 1939 TII.get(TargetOpcode::COPY), NewOp).addReg(Op); 1940 return NewOp; 1941 } 1942 } 1943 return Op; 1944 } 1945 1946 Register FastISel::fastEmitInst_(unsigned MachineInstOpcode, 1947 const TargetRegisterClass *RC) { 1948 Register ResultReg = createResultReg(RC); 1949 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1950 1951 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg); 1952 return ResultReg; 1953 } 1954 1955 Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode, 1956 const TargetRegisterClass *RC, unsigned Op0) { 1957 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1958 1959 Register ResultReg = createResultReg(RC); 1960 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1961 1962 if (II.getNumDefs() >= 1) 1963 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 1964 .addReg(Op0); 1965 else { 1966 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 1967 .addReg(Op0); 1968 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 1969 ResultReg) 1970 .addReg(II.implicit_defs()[0]); 1971 } 1972 1973 return ResultReg; 1974 } 1975 1976 Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 1977 const TargetRegisterClass *RC, unsigned Op0, 1978 unsigned Op1) { 1979 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1980 1981 Register ResultReg = createResultReg(RC); 1982 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1983 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1984 1985 if (II.getNumDefs() >= 1) 1986 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 1987 .addReg(Op0) 1988 .addReg(Op1); 1989 else { 1990 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 1991 .addReg(Op0) 1992 .addReg(Op1); 1993 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 1994 ResultReg) 1995 .addReg(II.implicit_defs()[0]); 1996 } 1997 return ResultReg; 1998 } 1999 2000 Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, 2001 const TargetRegisterClass *RC, unsigned Op0, 2002 unsigned Op1, unsigned Op2) { 2003 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2004 2005 Register ResultReg = createResultReg(RC); 2006 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2007 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 2008 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2); 2009 2010 if (II.getNumDefs() >= 1) 2011 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2012 .addReg(Op0) 2013 .addReg(Op1) 2014 .addReg(Op2); 2015 else { 2016 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2017 .addReg(Op0) 2018 .addReg(Op1) 2019 .addReg(Op2); 2020 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2021 ResultReg) 2022 .addReg(II.implicit_defs()[0]); 2023 } 2024 return ResultReg; 2025 } 2026 2027 Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, 2028 const TargetRegisterClass *RC, unsigned Op0, 2029 uint64_t Imm) { 2030 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2031 2032 Register ResultReg = createResultReg(RC); 2033 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2034 2035 if (II.getNumDefs() >= 1) 2036 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2037 .addReg(Op0) 2038 .addImm(Imm); 2039 else { 2040 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2041 .addReg(Op0) 2042 .addImm(Imm); 2043 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2044 ResultReg) 2045 .addReg(II.implicit_defs()[0]); 2046 } 2047 return ResultReg; 2048 } 2049 2050 Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, 2051 const TargetRegisterClass *RC, unsigned Op0, 2052 uint64_t Imm1, uint64_t Imm2) { 2053 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2054 2055 Register ResultReg = createResultReg(RC); 2056 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2057 2058 if (II.getNumDefs() >= 1) 2059 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2060 .addReg(Op0) 2061 .addImm(Imm1) 2062 .addImm(Imm2); 2063 else { 2064 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2065 .addReg(Op0) 2066 .addImm(Imm1) 2067 .addImm(Imm2); 2068 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2069 ResultReg) 2070 .addReg(II.implicit_defs()[0]); 2071 } 2072 return ResultReg; 2073 } 2074 2075 Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode, 2076 const TargetRegisterClass *RC, 2077 const ConstantFP *FPImm) { 2078 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2079 2080 Register ResultReg = createResultReg(RC); 2081 2082 if (II.getNumDefs() >= 1) 2083 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2084 .addFPImm(FPImm); 2085 else { 2086 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2087 .addFPImm(FPImm); 2088 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2089 ResultReg) 2090 .addReg(II.implicit_defs()[0]); 2091 } 2092 return ResultReg; 2093 } 2094 2095 Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, 2096 const TargetRegisterClass *RC, unsigned Op0, 2097 unsigned Op1, uint64_t Imm) { 2098 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2099 2100 Register ResultReg = createResultReg(RC); 2101 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2102 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 2103 2104 if (II.getNumDefs() >= 1) 2105 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2106 .addReg(Op0) 2107 .addReg(Op1) 2108 .addImm(Imm); 2109 else { 2110 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2111 .addReg(Op0) 2112 .addReg(Op1) 2113 .addImm(Imm); 2114 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2115 ResultReg) 2116 .addReg(II.implicit_defs()[0]); 2117 } 2118 return ResultReg; 2119 } 2120 2121 Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode, 2122 const TargetRegisterClass *RC, uint64_t Imm) { 2123 Register ResultReg = createResultReg(RC); 2124 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2125 2126 if (II.getNumDefs() >= 1) 2127 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2128 .addImm(Imm); 2129 else { 2130 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addImm(Imm); 2131 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2132 ResultReg) 2133 .addReg(II.implicit_defs()[0]); 2134 } 2135 return ResultReg; 2136 } 2137 2138 Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, 2139 uint32_t Idx) { 2140 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 2141 assert(Register::isVirtualRegister(Op0) && 2142 "Cannot yet extract from physregs"); 2143 const TargetRegisterClass *RC = MRI.getRegClass(Op0); 2144 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); 2145 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2146 ResultReg).addReg(Op0, 0, Idx); 2147 return ResultReg; 2148 } 2149 2150 /// Emit MachineInstrs to compute the value of Op with all but the least 2151 /// significant bit set to zero. 2152 Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0) { 2153 return fastEmit_ri(VT, VT, ISD::AND, Op0, 1); 2154 } 2155 2156 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. 2157 /// Emit code to ensure constants are copied into registers when needed. 2158 /// Remember the virtual registers that need to be added to the Machine PHI 2159 /// nodes as input. We cannot just directly add them, because expansion 2160 /// might result in multiple MBB's for one BB. As such, the start of the 2161 /// BB might correspond to a different MBB than the end. 2162 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 2163 const Instruction *TI = LLVMBB->getTerminator(); 2164 2165 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 2166 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size(); 2167 2168 // Check successor nodes' PHI nodes that expect a constant to be available 2169 // from this block. 2170 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 2171 const BasicBlock *SuccBB = TI->getSuccessor(succ); 2172 if (!isa<PHINode>(SuccBB->begin())) 2173 continue; 2174 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 2175 2176 // If this terminator has multiple identical successors (common for 2177 // switches), only handle each succ once. 2178 if (!SuccsHandled.insert(SuccMBB).second) 2179 continue; 2180 2181 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 2182 2183 // At this point we know that there is a 1-1 correspondence between LLVM PHI 2184 // nodes and Machine PHI nodes, but the incoming operands have not been 2185 // emitted yet. 2186 for (const PHINode &PN : SuccBB->phis()) { 2187 // Ignore dead phi's. 2188 if (PN.use_empty()) 2189 continue; 2190 2191 // Only handle legal types. Two interesting things to note here. First, 2192 // by bailing out early, we may leave behind some dead instructions, 2193 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its 2194 // own moves. Second, this check is necessary because FastISel doesn't 2195 // use CreateRegs to create registers, so it always creates 2196 // exactly one register for each non-void instruction. 2197 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true); 2198 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) { 2199 // Handle integer promotions, though, because they're common and easy. 2200 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) { 2201 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2202 return false; 2203 } 2204 } 2205 2206 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB); 2207 2208 // Set the DebugLoc for the copy. Use the location of the operand if 2209 // there is one; otherwise no location, flushLocalValueMap will fix it. 2210 MIMD = {}; 2211 if (const auto *Inst = dyn_cast<Instruction>(PHIOp)) 2212 MIMD = MIMetadata(*Inst); 2213 2214 Register Reg = getRegForValue(PHIOp); 2215 if (!Reg) { 2216 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2217 return false; 2218 } 2219 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg)); 2220 MIMD = {}; 2221 } 2222 } 2223 2224 return true; 2225 } 2226 2227 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) { 2228 assert(LI->hasOneUse() && 2229 "tryToFoldLoad expected a LoadInst with a single use"); 2230 // We know that the load has a single use, but don't know what it is. If it 2231 // isn't one of the folded instructions, then we can't succeed here. Handle 2232 // this by scanning the single-use users of the load until we get to FoldInst. 2233 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs. 2234 2235 const Instruction *TheUser = LI->user_back(); 2236 while (TheUser != FoldInst && // Scan up until we find FoldInst. 2237 // Stay in the right block. 2238 TheUser->getParent() == FoldInst->getParent() && 2239 --MaxUsers) { // Don't scan too far. 2240 // If there are multiple or no uses of this instruction, then bail out. 2241 if (!TheUser->hasOneUse()) 2242 return false; 2243 2244 TheUser = TheUser->user_back(); 2245 } 2246 2247 // If we didn't find the fold instruction, then we failed to collapse the 2248 // sequence. 2249 if (TheUser != FoldInst) 2250 return false; 2251 2252 // Don't try to fold volatile loads. Target has to deal with alignment 2253 // constraints. 2254 if (LI->isVolatile()) 2255 return false; 2256 2257 // Figure out which vreg this is going into. If there is no assigned vreg yet 2258 // then there actually was no reference to it. Perhaps the load is referenced 2259 // by a dead instruction. 2260 Register LoadReg = getRegForValue(LI); 2261 if (!LoadReg) 2262 return false; 2263 2264 // We can't fold if this vreg has no uses or more than one use. Multiple uses 2265 // may mean that the instruction got lowered to multiple MIs, or the use of 2266 // the loaded value ended up being multiple operands of the result. 2267 if (!MRI.hasOneUse(LoadReg)) 2268 return false; 2269 2270 // If the register has fixups, there may be additional uses through a 2271 // different alias of the register. 2272 if (FuncInfo.RegsWithFixups.contains(LoadReg)) 2273 return false; 2274 2275 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg); 2276 MachineInstr *User = RI->getParent(); 2277 2278 // Set the insertion point properly. Folding the load can cause generation of 2279 // other random instructions (like sign extends) for addressing modes; make 2280 // sure they get inserted in a logical place before the new instruction. 2281 FuncInfo.InsertPt = User; 2282 FuncInfo.MBB = User->getParent(); 2283 2284 // Ask the target to try folding the load. 2285 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI); 2286 } 2287 2288 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) { 2289 // Must be an add. 2290 if (!isa<AddOperator>(Add)) 2291 return false; 2292 // Type size needs to match. 2293 if (DL.getTypeSizeInBits(GEP->getType()) != 2294 DL.getTypeSizeInBits(Add->getType())) 2295 return false; 2296 // Must be in the same basic block. 2297 if (isa<Instruction>(Add) && 2298 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB) 2299 return false; 2300 // Must have a constant operand. 2301 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1)); 2302 } 2303 2304 MachineMemOperand * 2305 FastISel::createMachineMemOperandFor(const Instruction *I) const { 2306 const Value *Ptr; 2307 Type *ValTy; 2308 MaybeAlign Alignment; 2309 MachineMemOperand::Flags Flags; 2310 bool IsVolatile; 2311 2312 if (const auto *LI = dyn_cast<LoadInst>(I)) { 2313 Alignment = LI->getAlign(); 2314 IsVolatile = LI->isVolatile(); 2315 Flags = MachineMemOperand::MOLoad; 2316 Ptr = LI->getPointerOperand(); 2317 ValTy = LI->getType(); 2318 } else if (const auto *SI = dyn_cast<StoreInst>(I)) { 2319 Alignment = SI->getAlign(); 2320 IsVolatile = SI->isVolatile(); 2321 Flags = MachineMemOperand::MOStore; 2322 Ptr = SI->getPointerOperand(); 2323 ValTy = SI->getValueOperand()->getType(); 2324 } else 2325 return nullptr; 2326 2327 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal); 2328 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load); 2329 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable); 2330 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range); 2331 2332 AAMDNodes AAInfo = I->getAAMetadata(); 2333 2334 if (!Alignment) // Ensure that codegen never sees alignment 0. 2335 Alignment = DL.getABITypeAlign(ValTy); 2336 2337 unsigned Size = DL.getTypeStoreSize(ValTy); 2338 2339 if (IsVolatile) 2340 Flags |= MachineMemOperand::MOVolatile; 2341 if (IsNonTemporal) 2342 Flags |= MachineMemOperand::MONonTemporal; 2343 if (IsDereferenceable) 2344 Flags |= MachineMemOperand::MODereferenceable; 2345 if (IsInvariant) 2346 Flags |= MachineMemOperand::MOInvariant; 2347 2348 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size, 2349 *Alignment, AAInfo, Ranges); 2350 } 2351 2352 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const { 2353 // If both operands are the same, then try to optimize or fold the cmp. 2354 CmpInst::Predicate Predicate = CI->getPredicate(); 2355 if (CI->getOperand(0) != CI->getOperand(1)) 2356 return Predicate; 2357 2358 switch (Predicate) { 2359 default: llvm_unreachable("Invalid predicate!"); 2360 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break; 2361 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break; 2362 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break; 2363 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break; 2364 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break; 2365 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break; 2366 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break; 2367 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break; 2368 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break; 2369 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break; 2370 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break; 2371 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2372 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break; 2373 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2374 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break; 2375 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break; 2376 2377 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break; 2378 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break; 2379 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break; 2380 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2381 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break; 2382 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2383 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break; 2384 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break; 2385 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break; 2386 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break; 2387 } 2388 2389 return Predicate; 2390 } 2391