1 //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the X86-specific support for the FastISel class. Much
10 // of the target-specific code is generated by tablegen in the file
11 // X86GenFastISel.inc, which is #included here.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "X86.h"
16 #include "X86CallingConv.h"
17 #include "X86InstrBuilder.h"
18 #include "X86InstrInfo.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86RegisterInfo.h"
21 #include "X86Subtarget.h"
22 #include "X86TargetMachine.h"
23 #include "llvm/Analysis/BranchProbabilityInfo.h"
24 #include "llvm/CodeGen/FastISel.h"
25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/DebugInfo.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/GetElementPtrTypeIterator.h"
33 #include "llvm/IR/GlobalAlias.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/IntrinsicsX86.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/MC/MCAsmInfo.h"
40 #include "llvm/MC/MCSymbol.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Target/TargetOptions.h"
43 using namespace llvm;
44
45 namespace {
46
47 class X86FastISel final : public FastISel {
48 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
49 /// make the right decision when generating code for different targets.
50 const X86Subtarget *Subtarget;
51
52 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
53 /// floating point ops.
54 /// When SSE is available, use it for f32 operations.
55 /// When SSE2 is available, use it for f64 operations.
56 bool X86ScalarSSEf64;
57 bool X86ScalarSSEf32;
58
59 public:
X86FastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo)60 explicit X86FastISel(FunctionLoweringInfo &funcInfo,
61 const TargetLibraryInfo *libInfo)
62 : FastISel(funcInfo, libInfo) {
63 Subtarget = &funcInfo.MF->getSubtarget<X86Subtarget>();
64 X86ScalarSSEf64 = Subtarget->hasSSE2();
65 X86ScalarSSEf32 = Subtarget->hasSSE1();
66 }
67
68 bool fastSelectInstruction(const Instruction *I) override;
69
70 /// The specified machine instr operand is a vreg, and that
71 /// vreg is being provided by the specified load instruction. If possible,
72 /// try to fold the load as an operand to the instruction, returning true if
73 /// possible.
74 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
75 const LoadInst *LI) override;
76
77 bool fastLowerArguments() override;
78 bool fastLowerCall(CallLoweringInfo &CLI) override;
79 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
80
81 #include "X86GenFastISel.inc"
82
83 private:
84 bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT,
85 const DebugLoc &DL);
86
87 bool X86FastEmitLoad(MVT VT, X86AddressMode &AM, MachineMemOperand *MMO,
88 unsigned &ResultReg, unsigned Alignment = 1);
89
90 bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM,
91 MachineMemOperand *MMO = nullptr, bool Aligned = false);
92 bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
93 X86AddressMode &AM,
94 MachineMemOperand *MMO = nullptr, bool Aligned = false);
95
96 bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
97 unsigned &ResultReg);
98
99 bool X86SelectAddress(const Value *V, X86AddressMode &AM);
100 bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
101
102 bool X86SelectLoad(const Instruction *I);
103
104 bool X86SelectStore(const Instruction *I);
105
106 bool X86SelectRet(const Instruction *I);
107
108 bool X86SelectCmp(const Instruction *I);
109
110 bool X86SelectZExt(const Instruction *I);
111
112 bool X86SelectSExt(const Instruction *I);
113
114 bool X86SelectBranch(const Instruction *I);
115
116 bool X86SelectShift(const Instruction *I);
117
118 bool X86SelectDivRem(const Instruction *I);
119
120 bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I);
121
122 bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I);
123
124 bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I);
125
126 bool X86SelectSelect(const Instruction *I);
127
128 bool X86SelectTrunc(const Instruction *I);
129
130 bool X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned Opc,
131 const TargetRegisterClass *RC);
132
133 bool X86SelectFPExt(const Instruction *I);
134 bool X86SelectFPTrunc(const Instruction *I);
135 bool X86SelectSIToFP(const Instruction *I);
136 bool X86SelectUIToFP(const Instruction *I);
137 bool X86SelectIntToFP(const Instruction *I, bool IsSigned);
138
getInstrInfo() const139 const X86InstrInfo *getInstrInfo() const {
140 return Subtarget->getInstrInfo();
141 }
getTargetMachine() const142 const X86TargetMachine *getTargetMachine() const {
143 return static_cast<const X86TargetMachine *>(&TM);
144 }
145
146 bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
147
148 unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);
149 unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);
150 unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT);
151 unsigned fastMaterializeConstant(const Constant *C) override;
152
153 unsigned fastMaterializeAlloca(const AllocaInst *C) override;
154
155 unsigned fastMaterializeFloatZero(const ConstantFP *CF) override;
156
157 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
158 /// computed in an SSE register, not on the X87 floating point stack.
isScalarFPTypeInSSEReg(EVT VT) const159 bool isScalarFPTypeInSSEReg(EVT VT) const {
160 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
161 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
162 }
163
164 bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);
165
166 bool IsMemcpySmall(uint64_t Len);
167
168 bool TryEmitSmallMemcpy(X86AddressMode DestAM,
169 X86AddressMode SrcAM, uint64_t Len);
170
171 bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
172 const Value *Cond);
173
174 const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB,
175 X86AddressMode &AM);
176
177 unsigned fastEmitInst_rrrr(unsigned MachineInstOpcode,
178 const TargetRegisterClass *RC, unsigned Op0,
179 bool Op0IsKill, unsigned Op1, bool Op1IsKill,
180 unsigned Op2, bool Op2IsKill, unsigned Op3,
181 bool Op3IsKill);
182 };
183
184 } // end anonymous namespace.
185
186 static std::pair<unsigned, bool>
getX86SSEConditionCode(CmpInst::Predicate Predicate)187 getX86SSEConditionCode(CmpInst::Predicate Predicate) {
188 unsigned CC;
189 bool NeedSwap = false;
190
191 // SSE Condition code mapping:
192 // 0 - EQ
193 // 1 - LT
194 // 2 - LE
195 // 3 - UNORD
196 // 4 - NEQ
197 // 5 - NLT
198 // 6 - NLE
199 // 7 - ORD
200 switch (Predicate) {
201 default: llvm_unreachable("Unexpected predicate");
202 case CmpInst::FCMP_OEQ: CC = 0; break;
203 case CmpInst::FCMP_OGT: NeedSwap = true; LLVM_FALLTHROUGH;
204 case CmpInst::FCMP_OLT: CC = 1; break;
205 case CmpInst::FCMP_OGE: NeedSwap = true; LLVM_FALLTHROUGH;
206 case CmpInst::FCMP_OLE: CC = 2; break;
207 case CmpInst::FCMP_UNO: CC = 3; break;
208 case CmpInst::FCMP_UNE: CC = 4; break;
209 case CmpInst::FCMP_ULE: NeedSwap = true; LLVM_FALLTHROUGH;
210 case CmpInst::FCMP_UGE: CC = 5; break;
211 case CmpInst::FCMP_ULT: NeedSwap = true; LLVM_FALLTHROUGH;
212 case CmpInst::FCMP_UGT: CC = 6; break;
213 case CmpInst::FCMP_ORD: CC = 7; break;
214 case CmpInst::FCMP_UEQ: CC = 8; break;
215 case CmpInst::FCMP_ONE: CC = 12; break;
216 }
217
218 return std::make_pair(CC, NeedSwap);
219 }
220
221 /// Adds a complex addressing mode to the given machine instr builder.
222 /// Note, this will constrain the index register. If its not possible to
223 /// constrain the given index register, then a new one will be created. The
224 /// IndexReg field of the addressing mode will be updated to match in this case.
225 const MachineInstrBuilder &
addFullAddress(const MachineInstrBuilder & MIB,X86AddressMode & AM)226 X86FastISel::addFullAddress(const MachineInstrBuilder &MIB,
227 X86AddressMode &AM) {
228 // First constrain the index register. It needs to be a GR64_NOSP.
229 AM.IndexReg = constrainOperandRegClass(MIB->getDesc(), AM.IndexReg,
230 MIB->getNumOperands() +
231 X86::AddrIndexReg);
232 return ::addFullAddress(MIB, AM);
233 }
234
235 /// Check if it is possible to fold the condition from the XALU intrinsic
236 /// into the user. The condition code will only be updated on success.
foldX86XALUIntrinsic(X86::CondCode & CC,const Instruction * I,const Value * Cond)237 bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
238 const Value *Cond) {
239 if (!isa<ExtractValueInst>(Cond))
240 return false;
241
242 const auto *EV = cast<ExtractValueInst>(Cond);
243 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
244 return false;
245
246 const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
247 MVT RetVT;
248 const Function *Callee = II->getCalledFunction();
249 Type *RetTy =
250 cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
251 if (!isTypeLegal(RetTy, RetVT))
252 return false;
253
254 if (RetVT != MVT::i32 && RetVT != MVT::i64)
255 return false;
256
257 X86::CondCode TmpCC;
258 switch (II->getIntrinsicID()) {
259 default: return false;
260 case Intrinsic::sadd_with_overflow:
261 case Intrinsic::ssub_with_overflow:
262 case Intrinsic::smul_with_overflow:
263 case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break;
264 case Intrinsic::uadd_with_overflow:
265 case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break;
266 }
267
268 // Check if both instructions are in the same basic block.
269 if (II->getParent() != I->getParent())
270 return false;
271
272 // Make sure nothing is in the way
273 BasicBlock::const_iterator Start(I);
274 BasicBlock::const_iterator End(II);
275 for (auto Itr = std::prev(Start); Itr != End; --Itr) {
276 // We only expect extractvalue instructions between the intrinsic and the
277 // instruction to be selected.
278 if (!isa<ExtractValueInst>(Itr))
279 return false;
280
281 // Check that the extractvalue operand comes from the intrinsic.
282 const auto *EVI = cast<ExtractValueInst>(Itr);
283 if (EVI->getAggregateOperand() != II)
284 return false;
285 }
286
287 CC = TmpCC;
288 return true;
289 }
290
isTypeLegal(Type * Ty,MVT & VT,bool AllowI1)291 bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
292 EVT evt = TLI.getValueType(DL, Ty, /*AllowUnknown=*/true);
293 if (evt == MVT::Other || !evt.isSimple())
294 // Unhandled type. Halt "fast" selection and bail.
295 return false;
296
297 VT = evt.getSimpleVT();
298 // For now, require SSE/SSE2 for performing floating-point operations,
299 // since x87 requires additional work.
300 if (VT == MVT::f64 && !X86ScalarSSEf64)
301 return false;
302 if (VT == MVT::f32 && !X86ScalarSSEf32)
303 return false;
304 // Similarly, no f80 support yet.
305 if (VT == MVT::f80)
306 return false;
307 // We only handle legal types. For example, on x86-32 the instruction
308 // selector contains all of the 64-bit instructions from x86-64,
309 // under the assumption that i64 won't be used if the target doesn't
310 // support it.
311 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
312 }
313
314 /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
315 /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
316 /// Return true and the result register by reference if it is possible.
X86FastEmitLoad(MVT VT,X86AddressMode & AM,MachineMemOperand * MMO,unsigned & ResultReg,unsigned Alignment)317 bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM,
318 MachineMemOperand *MMO, unsigned &ResultReg,
319 unsigned Alignment) {
320 bool HasSSE41 = Subtarget->hasSSE41();
321 bool HasAVX = Subtarget->hasAVX();
322 bool HasAVX2 = Subtarget->hasAVX2();
323 bool HasAVX512 = Subtarget->hasAVX512();
324 bool HasVLX = Subtarget->hasVLX();
325 bool IsNonTemporal = MMO && MMO->isNonTemporal();
326
327 // Treat i1 loads the same as i8 loads. Masking will be done when storing.
328 if (VT == MVT::i1)
329 VT = MVT::i8;
330
331 // Get opcode and regclass of the output for the given load instruction.
332 unsigned Opc = 0;
333 switch (VT.SimpleTy) {
334 default: return false;
335 case MVT::i8:
336 Opc = X86::MOV8rm;
337 break;
338 case MVT::i16:
339 Opc = X86::MOV16rm;
340 break;
341 case MVT::i32:
342 Opc = X86::MOV32rm;
343 break;
344 case MVT::i64:
345 // Must be in x86-64 mode.
346 Opc = X86::MOV64rm;
347 break;
348 case MVT::f32:
349 if (X86ScalarSSEf32)
350 Opc = HasAVX512 ? X86::VMOVSSZrm_alt :
351 HasAVX ? X86::VMOVSSrm_alt :
352 X86::MOVSSrm_alt;
353 else
354 Opc = X86::LD_Fp32m;
355 break;
356 case MVT::f64:
357 if (X86ScalarSSEf64)
358 Opc = HasAVX512 ? X86::VMOVSDZrm_alt :
359 HasAVX ? X86::VMOVSDrm_alt :
360 X86::MOVSDrm_alt;
361 else
362 Opc = X86::LD_Fp64m;
363 break;
364 case MVT::f80:
365 // No f80 support yet.
366 return false;
367 case MVT::v4f32:
368 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
369 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
370 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
371 else if (Alignment >= 16)
372 Opc = HasVLX ? X86::VMOVAPSZ128rm :
373 HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm;
374 else
375 Opc = HasVLX ? X86::VMOVUPSZ128rm :
376 HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm;
377 break;
378 case MVT::v2f64:
379 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
380 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
381 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
382 else if (Alignment >= 16)
383 Opc = HasVLX ? X86::VMOVAPDZ128rm :
384 HasAVX ? X86::VMOVAPDrm : X86::MOVAPDrm;
385 else
386 Opc = HasVLX ? X86::VMOVUPDZ128rm :
387 HasAVX ? X86::VMOVUPDrm : X86::MOVUPDrm;
388 break;
389 case MVT::v4i32:
390 case MVT::v2i64:
391 case MVT::v8i16:
392 case MVT::v16i8:
393 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
394 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
395 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
396 else if (Alignment >= 16)
397 Opc = HasVLX ? X86::VMOVDQA64Z128rm :
398 HasAVX ? X86::VMOVDQArm : X86::MOVDQArm;
399 else
400 Opc = HasVLX ? X86::VMOVDQU64Z128rm :
401 HasAVX ? X86::VMOVDQUrm : X86::MOVDQUrm;
402 break;
403 case MVT::v8f32:
404 assert(HasAVX);
405 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
406 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
407 else if (IsNonTemporal && Alignment >= 16)
408 return false; // Force split for X86::VMOVNTDQArm
409 else if (Alignment >= 32)
410 Opc = HasVLX ? X86::VMOVAPSZ256rm : X86::VMOVAPSYrm;
411 else
412 Opc = HasVLX ? X86::VMOVUPSZ256rm : X86::VMOVUPSYrm;
413 break;
414 case MVT::v4f64:
415 assert(HasAVX);
416 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
417 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
418 else if (IsNonTemporal && Alignment >= 16)
419 return false; // Force split for X86::VMOVNTDQArm
420 else if (Alignment >= 32)
421 Opc = HasVLX ? X86::VMOVAPDZ256rm : X86::VMOVAPDYrm;
422 else
423 Opc = HasVLX ? X86::VMOVUPDZ256rm : X86::VMOVUPDYrm;
424 break;
425 case MVT::v8i32:
426 case MVT::v4i64:
427 case MVT::v16i16:
428 case MVT::v32i8:
429 assert(HasAVX);
430 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
431 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
432 else if (IsNonTemporal && Alignment >= 16)
433 return false; // Force split for X86::VMOVNTDQArm
434 else if (Alignment >= 32)
435 Opc = HasVLX ? X86::VMOVDQA64Z256rm : X86::VMOVDQAYrm;
436 else
437 Opc = HasVLX ? X86::VMOVDQU64Z256rm : X86::VMOVDQUYrm;
438 break;
439 case MVT::v16f32:
440 assert(HasAVX512);
441 if (IsNonTemporal && Alignment >= 64)
442 Opc = X86::VMOVNTDQAZrm;
443 else
444 Opc = (Alignment >= 64) ? X86::VMOVAPSZrm : X86::VMOVUPSZrm;
445 break;
446 case MVT::v8f64:
447 assert(HasAVX512);
448 if (IsNonTemporal && Alignment >= 64)
449 Opc = X86::VMOVNTDQAZrm;
450 else
451 Opc = (Alignment >= 64) ? X86::VMOVAPDZrm : X86::VMOVUPDZrm;
452 break;
453 case MVT::v8i64:
454 case MVT::v16i32:
455 case MVT::v32i16:
456 case MVT::v64i8:
457 assert(HasAVX512);
458 // Note: There are a lot more choices based on type with AVX-512, but
459 // there's really no advantage when the load isn't masked.
460 if (IsNonTemporal && Alignment >= 64)
461 Opc = X86::VMOVNTDQAZrm;
462 else
463 Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm : X86::VMOVDQU64Zrm;
464 break;
465 }
466
467 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
468
469 ResultReg = createResultReg(RC);
470 MachineInstrBuilder MIB =
471 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
472 addFullAddress(MIB, AM);
473 if (MMO)
474 MIB->addMemOperand(*FuncInfo.MF, MMO);
475 return true;
476 }
477
478 /// X86FastEmitStore - Emit a machine instruction to store a value Val of
479 /// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
480 /// and a displacement offset, or a GlobalAddress,
481 /// i.e. V. Return true if it is possible.
X86FastEmitStore(EVT VT,unsigned ValReg,bool ValIsKill,X86AddressMode & AM,MachineMemOperand * MMO,bool Aligned)482 bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
483 X86AddressMode &AM,
484 MachineMemOperand *MMO, bool Aligned) {
485 bool HasSSE1 = Subtarget->hasSSE1();
486 bool HasSSE2 = Subtarget->hasSSE2();
487 bool HasSSE4A = Subtarget->hasSSE4A();
488 bool HasAVX = Subtarget->hasAVX();
489 bool HasAVX512 = Subtarget->hasAVX512();
490 bool HasVLX = Subtarget->hasVLX();
491 bool IsNonTemporal = MMO && MMO->isNonTemporal();
492
493 // Get opcode and regclass of the output for the given store instruction.
494 unsigned Opc = 0;
495 switch (VT.getSimpleVT().SimpleTy) {
496 case MVT::f80: // No f80 support yet.
497 default: return false;
498 case MVT::i1: {
499 // Mask out all but lowest bit.
500 Register AndResult = createResultReg(&X86::GR8RegClass);
501 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
502 TII.get(X86::AND8ri), AndResult)
503 .addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);
504 ValReg = AndResult;
505 LLVM_FALLTHROUGH; // handle i1 as i8.
506 }
507 case MVT::i8: Opc = X86::MOV8mr; break;
508 case MVT::i16: Opc = X86::MOV16mr; break;
509 case MVT::i32:
510 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTImr : X86::MOV32mr;
511 break;
512 case MVT::i64:
513 // Must be in x86-64 mode.
514 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr;
515 break;
516 case MVT::f32:
517 if (X86ScalarSSEf32) {
518 if (IsNonTemporal && HasSSE4A)
519 Opc = X86::MOVNTSS;
520 else
521 Opc = HasAVX512 ? X86::VMOVSSZmr :
522 HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
523 } else
524 Opc = X86::ST_Fp32m;
525 break;
526 case MVT::f64:
527 if (X86ScalarSSEf32) {
528 if (IsNonTemporal && HasSSE4A)
529 Opc = X86::MOVNTSD;
530 else
531 Opc = HasAVX512 ? X86::VMOVSDZmr :
532 HasAVX ? X86::VMOVSDmr : X86::MOVSDmr;
533 } else
534 Opc = X86::ST_Fp64m;
535 break;
536 case MVT::x86mmx:
537 Opc = (IsNonTemporal && HasSSE1) ? X86::MMX_MOVNTQmr : X86::MMX_MOVQ64mr;
538 break;
539 case MVT::v4f32:
540 if (Aligned) {
541 if (IsNonTemporal)
542 Opc = HasVLX ? X86::VMOVNTPSZ128mr :
543 HasAVX ? X86::VMOVNTPSmr : X86::MOVNTPSmr;
544 else
545 Opc = HasVLX ? X86::VMOVAPSZ128mr :
546 HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr;
547 } else
548 Opc = HasVLX ? X86::VMOVUPSZ128mr :
549 HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr;
550 break;
551 case MVT::v2f64:
552 if (Aligned) {
553 if (IsNonTemporal)
554 Opc = HasVLX ? X86::VMOVNTPDZ128mr :
555 HasAVX ? X86::VMOVNTPDmr : X86::MOVNTPDmr;
556 else
557 Opc = HasVLX ? X86::VMOVAPDZ128mr :
558 HasAVX ? X86::VMOVAPDmr : X86::MOVAPDmr;
559 } else
560 Opc = HasVLX ? X86::VMOVUPDZ128mr :
561 HasAVX ? X86::VMOVUPDmr : X86::MOVUPDmr;
562 break;
563 case MVT::v4i32:
564 case MVT::v2i64:
565 case MVT::v8i16:
566 case MVT::v16i8:
567 if (Aligned) {
568 if (IsNonTemporal)
569 Opc = HasVLX ? X86::VMOVNTDQZ128mr :
570 HasAVX ? X86::VMOVNTDQmr : X86::MOVNTDQmr;
571 else
572 Opc = HasVLX ? X86::VMOVDQA64Z128mr :
573 HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr;
574 } else
575 Opc = HasVLX ? X86::VMOVDQU64Z128mr :
576 HasAVX ? X86::VMOVDQUmr : X86::MOVDQUmr;
577 break;
578 case MVT::v8f32:
579 assert(HasAVX);
580 if (Aligned) {
581 if (IsNonTemporal)
582 Opc = HasVLX ? X86::VMOVNTPSZ256mr : X86::VMOVNTPSYmr;
583 else
584 Opc = HasVLX ? X86::VMOVAPSZ256mr : X86::VMOVAPSYmr;
585 } else
586 Opc = HasVLX ? X86::VMOVUPSZ256mr : X86::VMOVUPSYmr;
587 break;
588 case MVT::v4f64:
589 assert(HasAVX);
590 if (Aligned) {
591 if (IsNonTemporal)
592 Opc = HasVLX ? X86::VMOVNTPDZ256mr : X86::VMOVNTPDYmr;
593 else
594 Opc = HasVLX ? X86::VMOVAPDZ256mr : X86::VMOVAPDYmr;
595 } else
596 Opc = HasVLX ? X86::VMOVUPDZ256mr : X86::VMOVUPDYmr;
597 break;
598 case MVT::v8i32:
599 case MVT::v4i64:
600 case MVT::v16i16:
601 case MVT::v32i8:
602 assert(HasAVX);
603 if (Aligned) {
604 if (IsNonTemporal)
605 Opc = HasVLX ? X86::VMOVNTDQZ256mr : X86::VMOVNTDQYmr;
606 else
607 Opc = HasVLX ? X86::VMOVDQA64Z256mr : X86::VMOVDQAYmr;
608 } else
609 Opc = HasVLX ? X86::VMOVDQU64Z256mr : X86::VMOVDQUYmr;
610 break;
611 case MVT::v16f32:
612 assert(HasAVX512);
613 if (Aligned)
614 Opc = IsNonTemporal ? X86::VMOVNTPSZmr : X86::VMOVAPSZmr;
615 else
616 Opc = X86::VMOVUPSZmr;
617 break;
618 case MVT::v8f64:
619 assert(HasAVX512);
620 if (Aligned) {
621 Opc = IsNonTemporal ? X86::VMOVNTPDZmr : X86::VMOVAPDZmr;
622 } else
623 Opc = X86::VMOVUPDZmr;
624 break;
625 case MVT::v8i64:
626 case MVT::v16i32:
627 case MVT::v32i16:
628 case MVT::v64i8:
629 assert(HasAVX512);
630 // Note: There are a lot more choices based on type with AVX-512, but
631 // there's really no advantage when the store isn't masked.
632 if (Aligned)
633 Opc = IsNonTemporal ? X86::VMOVNTDQZmr : X86::VMOVDQA64Zmr;
634 else
635 Opc = X86::VMOVDQU64Zmr;
636 break;
637 }
638
639 const MCInstrDesc &Desc = TII.get(Opc);
640 // Some of the instructions in the previous switch use FR128 instead
641 // of FR32 for ValReg. Make sure the register we feed the instruction
642 // matches its register class constraints.
643 // Note: This is fine to do a copy from FR32 to FR128, this is the
644 // same registers behind the scene and actually why it did not trigger
645 // any bugs before.
646 ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1);
647 MachineInstrBuilder MIB =
648 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc);
649 addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill));
650 if (MMO)
651 MIB->addMemOperand(*FuncInfo.MF, MMO);
652
653 return true;
654 }
655
X86FastEmitStore(EVT VT,const Value * Val,X86AddressMode & AM,MachineMemOperand * MMO,bool Aligned)656 bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
657 X86AddressMode &AM,
658 MachineMemOperand *MMO, bool Aligned) {
659 // Handle 'null' like i32/i64 0.
660 if (isa<ConstantPointerNull>(Val))
661 Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext()));
662
663 // If this is a store of a simple constant, fold the constant into the store.
664 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
665 unsigned Opc = 0;
666 bool Signed = true;
667 switch (VT.getSimpleVT().SimpleTy) {
668 default: break;
669 case MVT::i1:
670 Signed = false;
671 LLVM_FALLTHROUGH; // Handle as i8.
672 case MVT::i8: Opc = X86::MOV8mi; break;
673 case MVT::i16: Opc = X86::MOV16mi; break;
674 case MVT::i32: Opc = X86::MOV32mi; break;
675 case MVT::i64:
676 // Must be a 32-bit sign extended value.
677 if (isInt<32>(CI->getSExtValue()))
678 Opc = X86::MOV64mi32;
679 break;
680 }
681
682 if (Opc) {
683 MachineInstrBuilder MIB =
684 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
685 addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue()
686 : CI->getZExtValue());
687 if (MMO)
688 MIB->addMemOperand(*FuncInfo.MF, MMO);
689 return true;
690 }
691 }
692
693 Register ValReg = getRegForValue(Val);
694 if (ValReg == 0)
695 return false;
696
697 bool ValKill = hasTrivialKill(Val);
698 return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned);
699 }
700
701 /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
702 /// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
703 /// ISD::SIGN_EXTEND).
X86FastEmitExtend(ISD::NodeType Opc,EVT DstVT,unsigned Src,EVT SrcVT,unsigned & ResultReg)704 bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
705 unsigned Src, EVT SrcVT,
706 unsigned &ResultReg) {
707 unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
708 Src, /*TODO: Kill=*/false);
709 if (RR == 0)
710 return false;
711
712 ResultReg = RR;
713 return true;
714 }
715
handleConstantAddresses(const Value * V,X86AddressMode & AM)716 bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
717 // Handle constant address.
718 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
719 // Can't handle alternate code models yet.
720 if (TM.getCodeModel() != CodeModel::Small)
721 return false;
722
723 // Can't handle TLS yet.
724 if (GV->isThreadLocal())
725 return false;
726
727 // Can't handle !absolute_symbol references yet.
728 if (GV->isAbsoluteSymbolRef())
729 return false;
730
731 // RIP-relative addresses can't have additional register operands, so if
732 // we've already folded stuff into the addressing mode, just force the
733 // global value into its own register, which we can use as the basereg.
734 if (!Subtarget->isPICStyleRIPRel() ||
735 (AM.Base.Reg == 0 && AM.IndexReg == 0)) {
736 // Okay, we've committed to selecting this global. Set up the address.
737 AM.GV = GV;
738
739 // Allow the subtarget to classify the global.
740 unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
741
742 // If this reference is relative to the pic base, set it now.
743 if (isGlobalRelativeToPICBase(GVFlags)) {
744 // FIXME: How do we know Base.Reg is free??
745 AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
746 }
747
748 // Unless the ABI requires an extra load, return a direct reference to
749 // the global.
750 if (!isGlobalStubReference(GVFlags)) {
751 if (Subtarget->isPICStyleRIPRel()) {
752 // Use rip-relative addressing if we can. Above we verified that the
753 // base and index registers are unused.
754 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
755 AM.Base.Reg = X86::RIP;
756 }
757 AM.GVOpFlags = GVFlags;
758 return true;
759 }
760
761 // Ok, we need to do a load from a stub. If we've already loaded from
762 // this stub, reuse the loaded pointer, otherwise emit the load now.
763 DenseMap<const Value *, Register>::iterator I = LocalValueMap.find(V);
764 Register LoadReg;
765 if (I != LocalValueMap.end() && I->second) {
766 LoadReg = I->second;
767 } else {
768 // Issue load from stub.
769 unsigned Opc = 0;
770 const TargetRegisterClass *RC = nullptr;
771 X86AddressMode StubAM;
772 StubAM.Base.Reg = AM.Base.Reg;
773 StubAM.GV = GV;
774 StubAM.GVOpFlags = GVFlags;
775
776 // Prepare for inserting code in the local-value area.
777 SavePoint SaveInsertPt = enterLocalValueArea();
778
779 if (TLI.getPointerTy(DL) == MVT::i64) {
780 Opc = X86::MOV64rm;
781 RC = &X86::GR64RegClass;
782
783 if (Subtarget->isPICStyleRIPRel())
784 StubAM.Base.Reg = X86::RIP;
785 } else {
786 Opc = X86::MOV32rm;
787 RC = &X86::GR32RegClass;
788 }
789
790 LoadReg = createResultReg(RC);
791 MachineInstrBuilder LoadMI =
792 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), LoadReg);
793 addFullAddress(LoadMI, StubAM);
794
795 // Ok, back to normal mode.
796 leaveLocalValueArea(SaveInsertPt);
797
798 // Prevent loading GV stub multiple times in same MBB.
799 LocalValueMap[V] = LoadReg;
800 }
801
802 // Now construct the final address. Note that the Disp, Scale,
803 // and Index values may already be set here.
804 AM.Base.Reg = LoadReg;
805 AM.GV = nullptr;
806 return true;
807 }
808 }
809
810 // If all else fails, try to materialize the value in a register.
811 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
812 if (AM.Base.Reg == 0) {
813 AM.Base.Reg = getRegForValue(V);
814 return AM.Base.Reg != 0;
815 }
816 if (AM.IndexReg == 0) {
817 assert(AM.Scale == 1 && "Scale with no index!");
818 AM.IndexReg = getRegForValue(V);
819 return AM.IndexReg != 0;
820 }
821 }
822
823 return false;
824 }
825
826 /// X86SelectAddress - Attempt to fill in an address from the given value.
827 ///
X86SelectAddress(const Value * V,X86AddressMode & AM)828 bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
829 SmallVector<const Value *, 32> GEPs;
830 redo_gep:
831 const User *U = nullptr;
832 unsigned Opcode = Instruction::UserOp1;
833 if (const Instruction *I = dyn_cast<Instruction>(V)) {
834 // Don't walk into other basic blocks; it's possible we haven't
835 // visited them yet, so the instructions may not yet be assigned
836 // virtual registers.
837 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
838 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
839 Opcode = I->getOpcode();
840 U = I;
841 }
842 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
843 Opcode = C->getOpcode();
844 U = C;
845 }
846
847 if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))
848 if (Ty->getAddressSpace() > 255)
849 // Fast instruction selection doesn't support the special
850 // address spaces.
851 return false;
852
853 switch (Opcode) {
854 default: break;
855 case Instruction::BitCast:
856 // Look past bitcasts.
857 return X86SelectAddress(U->getOperand(0), AM);
858
859 case Instruction::IntToPtr:
860 // Look past no-op inttoptrs.
861 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
862 TLI.getPointerTy(DL))
863 return X86SelectAddress(U->getOperand(0), AM);
864 break;
865
866 case Instruction::PtrToInt:
867 // Look past no-op ptrtoints.
868 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
869 return X86SelectAddress(U->getOperand(0), AM);
870 break;
871
872 case Instruction::Alloca: {
873 // Do static allocas.
874 const AllocaInst *A = cast<AllocaInst>(V);
875 DenseMap<const AllocaInst *, int>::iterator SI =
876 FuncInfo.StaticAllocaMap.find(A);
877 if (SI != FuncInfo.StaticAllocaMap.end()) {
878 AM.BaseType = X86AddressMode::FrameIndexBase;
879 AM.Base.FrameIndex = SI->second;
880 return true;
881 }
882 break;
883 }
884
885 case Instruction::Add: {
886 // Adds of constants are common and easy enough.
887 if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
888 uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
889 // They have to fit in the 32-bit signed displacement field though.
890 if (isInt<32>(Disp)) {
891 AM.Disp = (uint32_t)Disp;
892 return X86SelectAddress(U->getOperand(0), AM);
893 }
894 }
895 break;
896 }
897
898 case Instruction::GetElementPtr: {
899 X86AddressMode SavedAM = AM;
900
901 // Pattern-match simple GEPs.
902 uint64_t Disp = (int32_t)AM.Disp;
903 unsigned IndexReg = AM.IndexReg;
904 unsigned Scale = AM.Scale;
905 gep_type_iterator GTI = gep_type_begin(U);
906 // Iterate through the indices, folding what we can. Constants can be
907 // folded, and one dynamic index can be handled, if the scale is supported.
908 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
909 i != e; ++i, ++GTI) {
910 const Value *Op = *i;
911 if (StructType *STy = GTI.getStructTypeOrNull()) {
912 const StructLayout *SL = DL.getStructLayout(STy);
913 Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
914 continue;
915 }
916
917 // A array/variable index is always of the form i*S where S is the
918 // constant scale size. See if we can push the scale into immediates.
919 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
920 for (;;) {
921 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
922 // Constant-offset addressing.
923 Disp += CI->getSExtValue() * S;
924 break;
925 }
926 if (canFoldAddIntoGEP(U, Op)) {
927 // A compatible add with a constant operand. Fold the constant.
928 ConstantInt *CI =
929 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
930 Disp += CI->getSExtValue() * S;
931 // Iterate on the other operand.
932 Op = cast<AddOperator>(Op)->getOperand(0);
933 continue;
934 }
935 if (IndexReg == 0 &&
936 (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
937 (S == 1 || S == 2 || S == 4 || S == 8)) {
938 // Scaled-index addressing.
939 Scale = S;
940 IndexReg = getRegForGEPIndex(Op).first;
941 if (IndexReg == 0)
942 return false;
943 break;
944 }
945 // Unsupported.
946 goto unsupported_gep;
947 }
948 }
949
950 // Check for displacement overflow.
951 if (!isInt<32>(Disp))
952 break;
953
954 AM.IndexReg = IndexReg;
955 AM.Scale = Scale;
956 AM.Disp = (uint32_t)Disp;
957 GEPs.push_back(V);
958
959 if (const GetElementPtrInst *GEP =
960 dyn_cast<GetElementPtrInst>(U->getOperand(0))) {
961 // Ok, the GEP indices were covered by constant-offset and scaled-index
962 // addressing. Update the address state and move on to examining the base.
963 V = GEP;
964 goto redo_gep;
965 } else if (X86SelectAddress(U->getOperand(0), AM)) {
966 return true;
967 }
968
969 // If we couldn't merge the gep value into this addr mode, revert back to
970 // our address and just match the value instead of completely failing.
971 AM = SavedAM;
972
973 for (const Value *I : reverse(GEPs))
974 if (handleConstantAddresses(I, AM))
975 return true;
976
977 return false;
978 unsupported_gep:
979 // Ok, the GEP indices weren't all covered.
980 break;
981 }
982 }
983
984 return handleConstantAddresses(V, AM);
985 }
986
987 /// X86SelectCallAddress - Attempt to fill in an address from the given value.
988 ///
X86SelectCallAddress(const Value * V,X86AddressMode & AM)989 bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
990 const User *U = nullptr;
991 unsigned Opcode = Instruction::UserOp1;
992 const Instruction *I = dyn_cast<Instruction>(V);
993 // Record if the value is defined in the same basic block.
994 //
995 // This information is crucial to know whether or not folding an
996 // operand is valid.
997 // Indeed, FastISel generates or reuses a virtual register for all
998 // operands of all instructions it selects. Obviously, the definition and
999 // its uses must use the same virtual register otherwise the produced
1000 // code is incorrect.
1001 // Before instruction selection, FunctionLoweringInfo::set sets the virtual
1002 // registers for values that are alive across basic blocks. This ensures
1003 // that the values are consistently set between across basic block, even
1004 // if different instruction selection mechanisms are used (e.g., a mix of
1005 // SDISel and FastISel).
1006 // For values local to a basic block, the instruction selection process
1007 // generates these virtual registers with whatever method is appropriate
1008 // for its needs. In particular, FastISel and SDISel do not share the way
1009 // local virtual registers are set.
1010 // Therefore, this is impossible (or at least unsafe) to share values
1011 // between basic blocks unless they use the same instruction selection
1012 // method, which is not guarantee for X86.
1013 // Moreover, things like hasOneUse could not be used accurately, if we
1014 // allow to reference values across basic blocks whereas they are not
1015 // alive across basic blocks initially.
1016 bool InMBB = true;
1017 if (I) {
1018 Opcode = I->getOpcode();
1019 U = I;
1020 InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();
1021 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
1022 Opcode = C->getOpcode();
1023 U = C;
1024 }
1025
1026 switch (Opcode) {
1027 default: break;
1028 case Instruction::BitCast:
1029 // Look past bitcasts if its operand is in the same BB.
1030 if (InMBB)
1031 return X86SelectCallAddress(U->getOperand(0), AM);
1032 break;
1033
1034 case Instruction::IntToPtr:
1035 // Look past no-op inttoptrs if its operand is in the same BB.
1036 if (InMBB &&
1037 TLI.getValueType(DL, U->getOperand(0)->getType()) ==
1038 TLI.getPointerTy(DL))
1039 return X86SelectCallAddress(U->getOperand(0), AM);
1040 break;
1041
1042 case Instruction::PtrToInt:
1043 // Look past no-op ptrtoints if its operand is in the same BB.
1044 if (InMBB && TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
1045 return X86SelectCallAddress(U->getOperand(0), AM);
1046 break;
1047 }
1048
1049 // Handle constant address.
1050 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1051 // Can't handle alternate code models yet.
1052 if (TM.getCodeModel() != CodeModel::Small)
1053 return false;
1054
1055 // RIP-relative addresses can't have additional register operands.
1056 if (Subtarget->isPICStyleRIPRel() &&
1057 (AM.Base.Reg != 0 || AM.IndexReg != 0))
1058 return false;
1059
1060 // Can't handle TLS.
1061 if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
1062 if (GVar->isThreadLocal())
1063 return false;
1064
1065 // Okay, we've committed to selecting this global. Set up the basic address.
1066 AM.GV = GV;
1067
1068 // Return a direct reference to the global. Fastisel can handle calls to
1069 // functions that require loads, such as dllimport and nonlazybind
1070 // functions.
1071 if (Subtarget->isPICStyleRIPRel()) {
1072 // Use rip-relative addressing if we can. Above we verified that the
1073 // base and index registers are unused.
1074 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
1075 AM.Base.Reg = X86::RIP;
1076 } else {
1077 AM.GVOpFlags = Subtarget->classifyLocalReference(nullptr);
1078 }
1079
1080 return true;
1081 }
1082
1083 // If all else fails, try to materialize the value in a register.
1084 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
1085 if (AM.Base.Reg == 0) {
1086 AM.Base.Reg = getRegForValue(V);
1087 return AM.Base.Reg != 0;
1088 }
1089 if (AM.IndexReg == 0) {
1090 assert(AM.Scale == 1 && "Scale with no index!");
1091 AM.IndexReg = getRegForValue(V);
1092 return AM.IndexReg != 0;
1093 }
1094 }
1095
1096 return false;
1097 }
1098
1099
1100 /// X86SelectStore - Select and emit code to implement store instructions.
X86SelectStore(const Instruction * I)1101 bool X86FastISel::X86SelectStore(const Instruction *I) {
1102 // Atomic stores need special handling.
1103 const StoreInst *S = cast<StoreInst>(I);
1104
1105 if (S->isAtomic())
1106 return false;
1107
1108 const Value *PtrV = I->getOperand(1);
1109 if (TLI.supportSwiftError()) {
1110 // Swifterror values can come from either a function parameter with
1111 // swifterror attribute or an alloca with swifterror attribute.
1112 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1113 if (Arg->hasSwiftErrorAttr())
1114 return false;
1115 }
1116
1117 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1118 if (Alloca->isSwiftError())
1119 return false;
1120 }
1121 }
1122
1123 const Value *Val = S->getValueOperand();
1124 const Value *Ptr = S->getPointerOperand();
1125
1126 MVT VT;
1127 if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true))
1128 return false;
1129
1130 Align Alignment = S->getAlign();
1131 Align ABIAlignment = DL.getABITypeAlign(Val->getType());
1132 bool Aligned = Alignment >= ABIAlignment;
1133
1134 X86AddressMode AM;
1135 if (!X86SelectAddress(Ptr, AM))
1136 return false;
1137
1138 return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned);
1139 }
1140
1141 /// X86SelectRet - Select and emit code to implement ret instructions.
X86SelectRet(const Instruction * I)1142 bool X86FastISel::X86SelectRet(const Instruction *I) {
1143 const ReturnInst *Ret = cast<ReturnInst>(I);
1144 const Function &F = *I->getParent()->getParent();
1145 const X86MachineFunctionInfo *X86MFInfo =
1146 FuncInfo.MF->getInfo<X86MachineFunctionInfo>();
1147
1148 if (!FuncInfo.CanLowerReturn)
1149 return false;
1150
1151 if (TLI.supportSwiftError() &&
1152 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
1153 return false;
1154
1155 if (TLI.supportSplitCSR(FuncInfo.MF))
1156 return false;
1157
1158 CallingConv::ID CC = F.getCallingConv();
1159 if (CC != CallingConv::C &&
1160 CC != CallingConv::Fast &&
1161 CC != CallingConv::Tail &&
1162 CC != CallingConv::X86_FastCall &&
1163 CC != CallingConv::X86_StdCall &&
1164 CC != CallingConv::X86_ThisCall &&
1165 CC != CallingConv::X86_64_SysV &&
1166 CC != CallingConv::Win64)
1167 return false;
1168
1169 // Don't handle popping bytes if they don't fit the ret's immediate.
1170 if (!isUInt<16>(X86MFInfo->getBytesToPopOnReturn()))
1171 return false;
1172
1173 // fastcc with -tailcallopt is intended to provide a guaranteed
1174 // tail call optimization. Fastisel doesn't know how to do that.
1175 if ((CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) ||
1176 CC == CallingConv::Tail)
1177 return false;
1178
1179 // Let SDISel handle vararg functions.
1180 if (F.isVarArg())
1181 return false;
1182
1183 // Build a list of return value registers.
1184 SmallVector<unsigned, 4> RetRegs;
1185
1186 if (Ret->getNumOperands() > 0) {
1187 SmallVector<ISD::OutputArg, 4> Outs;
1188 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1189
1190 // Analyze operands of the call, assigning locations to each operand.
1191 SmallVector<CCValAssign, 16> ValLocs;
1192 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
1193 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
1194
1195 const Value *RV = Ret->getOperand(0);
1196 Register Reg = getRegForValue(RV);
1197 if (Reg == 0)
1198 return false;
1199
1200 // Only handle a single return value for now.
1201 if (ValLocs.size() != 1)
1202 return false;
1203
1204 CCValAssign &VA = ValLocs[0];
1205
1206 // Don't bother handling odd stuff for now.
1207 if (VA.getLocInfo() != CCValAssign::Full)
1208 return false;
1209 // Only handle register returns for now.
1210 if (!VA.isRegLoc())
1211 return false;
1212
1213 // The calling-convention tables for x87 returns don't tell
1214 // the whole story.
1215 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
1216 return false;
1217
1218 unsigned SrcReg = Reg + VA.getValNo();
1219 EVT SrcVT = TLI.getValueType(DL, RV->getType());
1220 EVT DstVT = VA.getValVT();
1221 // Special handling for extended integers.
1222 if (SrcVT != DstVT) {
1223 if (SrcVT != MVT::i1 && SrcVT != MVT::i8 && SrcVT != MVT::i16)
1224 return false;
1225
1226 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
1227 return false;
1228
1229 assert(DstVT == MVT::i32 && "X86 should always ext to i32");
1230
1231 if (SrcVT == MVT::i1) {
1232 if (Outs[0].Flags.isSExt())
1233 return false;
1234 // TODO
1235 SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*Op0IsKill=*/false);
1236 SrcVT = MVT::i8;
1237 }
1238 unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
1239 ISD::SIGN_EXTEND;
1240 // TODO
1241 SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg,
1242 /*Op0IsKill=*/false);
1243 }
1244
1245 // Make the copy.
1246 Register DstReg = VA.getLocReg();
1247 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
1248 // Avoid a cross-class copy. This is very unlikely.
1249 if (!SrcRC->contains(DstReg))
1250 return false;
1251 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1252 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
1253
1254 // Add register to return instruction.
1255 RetRegs.push_back(VA.getLocReg());
1256 }
1257
1258 // Swift calling convention does not require we copy the sret argument
1259 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
1260
1261 // All x86 ABIs require that for returning structs by value we copy
1262 // the sret argument into %rax/%eax (depending on ABI) for the return.
1263 // We saved the argument into a virtual register in the entry block,
1264 // so now we copy the value out and into %rax/%eax.
1265 if (F.hasStructRetAttr() && CC != CallingConv::Swift) {
1266 Register Reg = X86MFInfo->getSRetReturnReg();
1267 assert(Reg &&
1268 "SRetReturnReg should have been set in LowerFormalArguments()!");
1269 unsigned RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
1270 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1271 TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);
1272 RetRegs.push_back(RetReg);
1273 }
1274
1275 // Now emit the RET.
1276 MachineInstrBuilder MIB;
1277 if (X86MFInfo->getBytesToPopOnReturn()) {
1278 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1279 TII.get(Subtarget->is64Bit() ? X86::RETIQ : X86::RETIL))
1280 .addImm(X86MFInfo->getBytesToPopOnReturn());
1281 } else {
1282 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1283 TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
1284 }
1285 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
1286 MIB.addReg(RetRegs[i], RegState::Implicit);
1287 return true;
1288 }
1289
1290 /// X86SelectLoad - Select and emit code to implement load instructions.
1291 ///
X86SelectLoad(const Instruction * I)1292 bool X86FastISel::X86SelectLoad(const Instruction *I) {
1293 const LoadInst *LI = cast<LoadInst>(I);
1294
1295 // Atomic loads need special handling.
1296 if (LI->isAtomic())
1297 return false;
1298
1299 const Value *SV = I->getOperand(0);
1300 if (TLI.supportSwiftError()) {
1301 // Swifterror values can come from either a function parameter with
1302 // swifterror attribute or an alloca with swifterror attribute.
1303 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
1304 if (Arg->hasSwiftErrorAttr())
1305 return false;
1306 }
1307
1308 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1309 if (Alloca->isSwiftError())
1310 return false;
1311 }
1312 }
1313
1314 MVT VT;
1315 if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))
1316 return false;
1317
1318 const Value *Ptr = LI->getPointerOperand();
1319
1320 X86AddressMode AM;
1321 if (!X86SelectAddress(Ptr, AM))
1322 return false;
1323
1324 unsigned ResultReg = 0;
1325 if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg,
1326 LI->getAlign().value()))
1327 return false;
1328
1329 updateValueMap(I, ResultReg);
1330 return true;
1331 }
1332
X86ChooseCmpOpcode(EVT VT,const X86Subtarget * Subtarget)1333 static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
1334 bool HasAVX512 = Subtarget->hasAVX512();
1335 bool HasAVX = Subtarget->hasAVX();
1336 bool X86ScalarSSEf32 = Subtarget->hasSSE1();
1337 bool X86ScalarSSEf64 = Subtarget->hasSSE2();
1338
1339 switch (VT.getSimpleVT().SimpleTy) {
1340 default: return 0;
1341 case MVT::i8: return X86::CMP8rr;
1342 case MVT::i16: return X86::CMP16rr;
1343 case MVT::i32: return X86::CMP32rr;
1344 case MVT::i64: return X86::CMP64rr;
1345 case MVT::f32:
1346 return X86ScalarSSEf32
1347 ? (HasAVX512 ? X86::VUCOMISSZrr
1348 : HasAVX ? X86::VUCOMISSrr : X86::UCOMISSrr)
1349 : 0;
1350 case MVT::f64:
1351 return X86ScalarSSEf64
1352 ? (HasAVX512 ? X86::VUCOMISDZrr
1353 : HasAVX ? X86::VUCOMISDrr : X86::UCOMISDrr)
1354 : 0;
1355 }
1356 }
1357
1358 /// If we have a comparison with RHS as the RHS of the comparison, return an
1359 /// opcode that works for the compare (e.g. CMP32ri) otherwise return 0.
X86ChooseCmpImmediateOpcode(EVT VT,const ConstantInt * RHSC)1360 static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
1361 int64_t Val = RHSC->getSExtValue();
1362 switch (VT.getSimpleVT().SimpleTy) {
1363 // Otherwise, we can't fold the immediate into this comparison.
1364 default:
1365 return 0;
1366 case MVT::i8:
1367 return X86::CMP8ri;
1368 case MVT::i16:
1369 if (isInt<8>(Val))
1370 return X86::CMP16ri8;
1371 return X86::CMP16ri;
1372 case MVT::i32:
1373 if (isInt<8>(Val))
1374 return X86::CMP32ri8;
1375 return X86::CMP32ri;
1376 case MVT::i64:
1377 if (isInt<8>(Val))
1378 return X86::CMP64ri8;
1379 // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
1380 // field.
1381 if (isInt<32>(Val))
1382 return X86::CMP64ri32;
1383 return 0;
1384 }
1385 }
1386
X86FastEmitCompare(const Value * Op0,const Value * Op1,EVT VT,const DebugLoc & CurDbgLoc)1387 bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
1388 const DebugLoc &CurDbgLoc) {
1389 Register Op0Reg = getRegForValue(Op0);
1390 if (Op0Reg == 0) return false;
1391
1392 // Handle 'null' like i32/i64 0.
1393 if (isa<ConstantPointerNull>(Op1))
1394 Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext()));
1395
1396 // We have two options: compare with register or immediate. If the RHS of
1397 // the compare is an immediate that we can fold into this compare, use
1398 // CMPri, otherwise use CMPrr.
1399 if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1400 if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
1401 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareImmOpc))
1402 .addReg(Op0Reg)
1403 .addImm(Op1C->getSExtValue());
1404 return true;
1405 }
1406 }
1407
1408 unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
1409 if (CompareOpc == 0) return false;
1410
1411 Register Op1Reg = getRegForValue(Op1);
1412 if (Op1Reg == 0) return false;
1413 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc))
1414 .addReg(Op0Reg)
1415 .addReg(Op1Reg);
1416
1417 return true;
1418 }
1419
X86SelectCmp(const Instruction * I)1420 bool X86FastISel::X86SelectCmp(const Instruction *I) {
1421 const CmpInst *CI = cast<CmpInst>(I);
1422
1423 MVT VT;
1424 if (!isTypeLegal(I->getOperand(0)->getType(), VT))
1425 return false;
1426
1427 // Try to optimize or fold the cmp.
1428 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1429 unsigned ResultReg = 0;
1430 switch (Predicate) {
1431 default: break;
1432 case CmpInst::FCMP_FALSE: {
1433 ResultReg = createResultReg(&X86::GR32RegClass);
1434 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
1435 ResultReg);
1436 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg,
1437 /*Op0IsKill=*/true, X86::sub_8bit);
1438 if (!ResultReg)
1439 return false;
1440 break;
1441 }
1442 case CmpInst::FCMP_TRUE: {
1443 ResultReg = createResultReg(&X86::GR8RegClass);
1444 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
1445 ResultReg).addImm(1);
1446 break;
1447 }
1448 }
1449
1450 if (ResultReg) {
1451 updateValueMap(I, ResultReg);
1452 return true;
1453 }
1454
1455 const Value *LHS = CI->getOperand(0);
1456 const Value *RHS = CI->getOperand(1);
1457
1458 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
1459 // We don't have to materialize a zero constant for this case and can just use
1460 // %x again on the RHS.
1461 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1462 const auto *RHSC = dyn_cast<ConstantFP>(RHS);
1463 if (RHSC && RHSC->isNullValue())
1464 RHS = LHS;
1465 }
1466
1467 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1468 static const uint16_t SETFOpcTable[2][3] = {
1469 { X86::COND_E, X86::COND_NP, X86::AND8rr },
1470 { X86::COND_NE, X86::COND_P, X86::OR8rr }
1471 };
1472 const uint16_t *SETFOpc = nullptr;
1473 switch (Predicate) {
1474 default: break;
1475 case CmpInst::FCMP_OEQ: SETFOpc = &SETFOpcTable[0][0]; break;
1476 case CmpInst::FCMP_UNE: SETFOpc = &SETFOpcTable[1][0]; break;
1477 }
1478
1479 ResultReg = createResultReg(&X86::GR8RegClass);
1480 if (SETFOpc) {
1481 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1482 return false;
1483
1484 Register FlagReg1 = createResultReg(&X86::GR8RegClass);
1485 Register FlagReg2 = createResultReg(&X86::GR8RegClass);
1486 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
1487 FlagReg1).addImm(SETFOpc[0]);
1488 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
1489 FlagReg2).addImm(SETFOpc[1]);
1490 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]),
1491 ResultReg).addReg(FlagReg1).addReg(FlagReg2);
1492 updateValueMap(I, ResultReg);
1493 return true;
1494 }
1495
1496 X86::CondCode CC;
1497 bool SwapArgs;
1498 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1499 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1500
1501 if (SwapArgs)
1502 std::swap(LHS, RHS);
1503
1504 // Emit a compare of LHS/RHS.
1505 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1506 return false;
1507
1508 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
1509 ResultReg).addImm(CC);
1510 updateValueMap(I, ResultReg);
1511 return true;
1512 }
1513
X86SelectZExt(const Instruction * I)1514 bool X86FastISel::X86SelectZExt(const Instruction *I) {
1515 EVT DstVT = TLI.getValueType(DL, I->getType());
1516 if (!TLI.isTypeLegal(DstVT))
1517 return false;
1518
1519 Register ResultReg = getRegForValue(I->getOperand(0));
1520 if (ResultReg == 0)
1521 return false;
1522
1523 // Handle zero-extension from i1 to i8, which is common.
1524 MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
1525 if (SrcVT == MVT::i1) {
1526 // Set the high bits to zero.
1527 ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
1528 SrcVT = MVT::i8;
1529
1530 if (ResultReg == 0)
1531 return false;
1532 }
1533
1534 if (DstVT == MVT::i64) {
1535 // Handle extension to 64-bits via sub-register shenanigans.
1536 unsigned MovInst;
1537
1538 switch (SrcVT.SimpleTy) {
1539 case MVT::i8: MovInst = X86::MOVZX32rr8; break;
1540 case MVT::i16: MovInst = X86::MOVZX32rr16; break;
1541 case MVT::i32: MovInst = X86::MOV32rr; break;
1542 default: llvm_unreachable("Unexpected zext to i64 source type");
1543 }
1544
1545 Register Result32 = createResultReg(&X86::GR32RegClass);
1546 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32)
1547 .addReg(ResultReg);
1548
1549 ResultReg = createResultReg(&X86::GR64RegClass);
1550 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::SUBREG_TO_REG),
1551 ResultReg)
1552 .addImm(0).addReg(Result32).addImm(X86::sub_32bit);
1553 } else if (DstVT == MVT::i16) {
1554 // i8->i16 doesn't exist in the autogenerated isel table. Need to zero
1555 // extend to 32-bits and then extract down to 16-bits.
1556 Register Result32 = createResultReg(&X86::GR32RegClass);
1557 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8),
1558 Result32).addReg(ResultReg);
1559
1560 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32,
1561 /*Op0IsKill=*/true, X86::sub_16bit);
1562 } else if (DstVT != MVT::i8) {
1563 ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
1564 ResultReg, /*Op0IsKill=*/true);
1565 if (ResultReg == 0)
1566 return false;
1567 }
1568
1569 updateValueMap(I, ResultReg);
1570 return true;
1571 }
1572
X86SelectSExt(const Instruction * I)1573 bool X86FastISel::X86SelectSExt(const Instruction *I) {
1574 EVT DstVT = TLI.getValueType(DL, I->getType());
1575 if (!TLI.isTypeLegal(DstVT))
1576 return false;
1577
1578 Register ResultReg = getRegForValue(I->getOperand(0));
1579 if (ResultReg == 0)
1580 return false;
1581
1582 // Handle sign-extension from i1 to i8.
1583 MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
1584 if (SrcVT == MVT::i1) {
1585 // Set the high bits to zero.
1586 Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg,
1587 /*TODO: Kill=*/false);
1588 if (ZExtReg == 0)
1589 return false;
1590
1591 // Negate the result to make an 8-bit sign extended value.
1592 ResultReg = createResultReg(&X86::GR8RegClass);
1593 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::NEG8r),
1594 ResultReg).addReg(ZExtReg);
1595
1596 SrcVT = MVT::i8;
1597 }
1598
1599 if (DstVT == MVT::i16) {
1600 // i8->i16 doesn't exist in the autogenerated isel table. Need to sign
1601 // extend to 32-bits and then extract down to 16-bits.
1602 Register Result32 = createResultReg(&X86::GR32RegClass);
1603 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8),
1604 Result32).addReg(ResultReg);
1605
1606 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32,
1607 /*Op0IsKill=*/true, X86::sub_16bit);
1608 } else if (DstVT != MVT::i8) {
1609 ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND,
1610 ResultReg, /*Op0IsKill=*/true);
1611 if (ResultReg == 0)
1612 return false;
1613 }
1614
1615 updateValueMap(I, ResultReg);
1616 return true;
1617 }
1618
X86SelectBranch(const Instruction * I)1619 bool X86FastISel::X86SelectBranch(const Instruction *I) {
1620 // Unconditional branches are selected by tablegen-generated code.
1621 // Handle a conditional branch.
1622 const BranchInst *BI = cast<BranchInst>(I);
1623 MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1624 MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
1625
1626 // Fold the common case of a conditional branch with a comparison
1627 // in the same block (values defined on other blocks may not have
1628 // initialized registers).
1629 X86::CondCode CC;
1630 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1631 if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
1632 EVT VT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1633
1634 // Try to optimize or fold the cmp.
1635 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1636 switch (Predicate) {
1637 default: break;
1638 case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true;
1639 case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true;
1640 }
1641
1642 const Value *CmpLHS = CI->getOperand(0);
1643 const Value *CmpRHS = CI->getOperand(1);
1644
1645 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x,
1646 // 0.0.
1647 // We don't have to materialize a zero constant for this case and can just
1648 // use %x again on the RHS.
1649 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1650 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
1651 if (CmpRHSC && CmpRHSC->isNullValue())
1652 CmpRHS = CmpLHS;
1653 }
1654
1655 // Try to take advantage of fallthrough opportunities.
1656 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1657 std::swap(TrueMBB, FalseMBB);
1658 Predicate = CmpInst::getInversePredicate(Predicate);
1659 }
1660
1661 // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition
1662 // code check. Instead two branch instructions are required to check all
1663 // the flags. First we change the predicate to a supported condition code,
1664 // which will be the first branch. Later one we will emit the second
1665 // branch.
1666 bool NeedExtraBranch = false;
1667 switch (Predicate) {
1668 default: break;
1669 case CmpInst::FCMP_OEQ:
1670 std::swap(TrueMBB, FalseMBB);
1671 LLVM_FALLTHROUGH;
1672 case CmpInst::FCMP_UNE:
1673 NeedExtraBranch = true;
1674 Predicate = CmpInst::FCMP_ONE;
1675 break;
1676 }
1677
1678 bool SwapArgs;
1679 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1680 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1681
1682 if (SwapArgs)
1683 std::swap(CmpLHS, CmpRHS);
1684
1685 // Emit a compare of the LHS and RHS, setting the flags.
1686 if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc()))
1687 return false;
1688
1689 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1))
1690 .addMBB(TrueMBB).addImm(CC);
1691
1692 // X86 requires a second branch to handle UNE (and OEQ, which is mapped
1693 // to UNE above).
1694 if (NeedExtraBranch) {
1695 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1))
1696 .addMBB(TrueMBB).addImm(X86::COND_P);
1697 }
1698
1699 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1700 return true;
1701 }
1702 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1703 // Handle things like "%cond = trunc i32 %X to i1 / br i1 %cond", which
1704 // typically happen for _Bool and C++ bools.
1705 MVT SourceVT;
1706 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1707 isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
1708 unsigned TestOpc = 0;
1709 switch (SourceVT.SimpleTy) {
1710 default: break;
1711 case MVT::i8: TestOpc = X86::TEST8ri; break;
1712 case MVT::i16: TestOpc = X86::TEST16ri; break;
1713 case MVT::i32: TestOpc = X86::TEST32ri; break;
1714 case MVT::i64: TestOpc = X86::TEST64ri32; break;
1715 }
1716 if (TestOpc) {
1717 Register OpReg = getRegForValue(TI->getOperand(0));
1718 if (OpReg == 0) return false;
1719
1720 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc))
1721 .addReg(OpReg).addImm(1);
1722
1723 unsigned JmpCond = X86::COND_NE;
1724 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1725 std::swap(TrueMBB, FalseMBB);
1726 JmpCond = X86::COND_E;
1727 }
1728
1729 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1))
1730 .addMBB(TrueMBB).addImm(JmpCond);
1731
1732 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1733 return true;
1734 }
1735 }
1736 } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) {
1737 // Fake request the condition, otherwise the intrinsic might be completely
1738 // optimized away.
1739 Register TmpReg = getRegForValue(BI->getCondition());
1740 if (TmpReg == 0)
1741 return false;
1742
1743 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1))
1744 .addMBB(TrueMBB).addImm(CC);
1745 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1746 return true;
1747 }
1748
1749 // Otherwise do a clumsy setcc and re-test it.
1750 // Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used
1751 // in an explicit cast, so make sure to handle that correctly.
1752 Register OpReg = getRegForValue(BI->getCondition());
1753 if (OpReg == 0) return false;
1754
1755 // In case OpReg is a K register, COPY to a GPR
1756 if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) {
1757 unsigned KOpReg = OpReg;
1758 OpReg = createResultReg(&X86::GR32RegClass);
1759 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1760 TII.get(TargetOpcode::COPY), OpReg)
1761 .addReg(KOpReg);
1762 OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, /*Op0IsKill=*/true,
1763 X86::sub_8bit);
1764 }
1765 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
1766 .addReg(OpReg)
1767 .addImm(1);
1768 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1))
1769 .addMBB(TrueMBB).addImm(X86::COND_NE);
1770 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1771 return true;
1772 }
1773
X86SelectShift(const Instruction * I)1774 bool X86FastISel::X86SelectShift(const Instruction *I) {
1775 unsigned CReg = 0, OpReg = 0;
1776 const TargetRegisterClass *RC = nullptr;
1777 if (I->getType()->isIntegerTy(8)) {
1778 CReg = X86::CL;
1779 RC = &X86::GR8RegClass;
1780 switch (I->getOpcode()) {
1781 case Instruction::LShr: OpReg = X86::SHR8rCL; break;
1782 case Instruction::AShr: OpReg = X86::SAR8rCL; break;
1783 case Instruction::Shl: OpReg = X86::SHL8rCL; break;
1784 default: return false;
1785 }
1786 } else if (I->getType()->isIntegerTy(16)) {
1787 CReg = X86::CX;
1788 RC = &X86::GR16RegClass;
1789 switch (I->getOpcode()) {
1790 default: llvm_unreachable("Unexpected shift opcode");
1791 case Instruction::LShr: OpReg = X86::SHR16rCL; break;
1792 case Instruction::AShr: OpReg = X86::SAR16rCL; break;
1793 case Instruction::Shl: OpReg = X86::SHL16rCL; break;
1794 }
1795 } else if (I->getType()->isIntegerTy(32)) {
1796 CReg = X86::ECX;
1797 RC = &X86::GR32RegClass;
1798 switch (I->getOpcode()) {
1799 default: llvm_unreachable("Unexpected shift opcode");
1800 case Instruction::LShr: OpReg = X86::SHR32rCL; break;
1801 case Instruction::AShr: OpReg = X86::SAR32rCL; break;
1802 case Instruction::Shl: OpReg = X86::SHL32rCL; break;
1803 }
1804 } else if (I->getType()->isIntegerTy(64)) {
1805 CReg = X86::RCX;
1806 RC = &X86::GR64RegClass;
1807 switch (I->getOpcode()) {
1808 default: llvm_unreachable("Unexpected shift opcode");
1809 case Instruction::LShr: OpReg = X86::SHR64rCL; break;
1810 case Instruction::AShr: OpReg = X86::SAR64rCL; break;
1811 case Instruction::Shl: OpReg = X86::SHL64rCL; break;
1812 }
1813 } else {
1814 return false;
1815 }
1816
1817 MVT VT;
1818 if (!isTypeLegal(I->getType(), VT))
1819 return false;
1820
1821 Register Op0Reg = getRegForValue(I->getOperand(0));
1822 if (Op0Reg == 0) return false;
1823
1824 Register Op1Reg = getRegForValue(I->getOperand(1));
1825 if (Op1Reg == 0) return false;
1826 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
1827 CReg).addReg(Op1Reg);
1828
1829 // The shift instruction uses X86::CL. If we defined a super-register
1830 // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
1831 if (CReg != X86::CL)
1832 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1833 TII.get(TargetOpcode::KILL), X86::CL)
1834 .addReg(CReg, RegState::Kill);
1835
1836 Register ResultReg = createResultReg(RC);
1837 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg)
1838 .addReg(Op0Reg);
1839 updateValueMap(I, ResultReg);
1840 return true;
1841 }
1842
X86SelectDivRem(const Instruction * I)1843 bool X86FastISel::X86SelectDivRem(const Instruction *I) {
1844 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1845 const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem
1846 const static bool S = true; // IsSigned
1847 const static bool U = false; // !IsSigned
1848 const static unsigned Copy = TargetOpcode::COPY;
1849 // For the X86 DIV/IDIV instruction, in most cases the dividend
1850 // (numerator) must be in a specific register pair highreg:lowreg,
1851 // producing the quotient in lowreg and the remainder in highreg.
1852 // For most data types, to set up the instruction, the dividend is
1853 // copied into lowreg, and lowreg is sign-extended or zero-extended
1854 // into highreg. The exception is i8, where the dividend is defined
1855 // as a single register rather than a register pair, and we
1856 // therefore directly sign-extend or zero-extend the dividend into
1857 // lowreg, instead of copying, and ignore the highreg.
1858 const static struct DivRemEntry {
1859 // The following portion depends only on the data type.
1860 const TargetRegisterClass *RC;
1861 unsigned LowInReg; // low part of the register pair
1862 unsigned HighInReg; // high part of the register pair
1863 // The following portion depends on both the data type and the operation.
1864 struct DivRemResult {
1865 unsigned OpDivRem; // The specific DIV/IDIV opcode to use.
1866 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1867 // highreg, or copying a zero into highreg.
1868 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1869 // zero/sign-extending into lowreg for i8.
1870 unsigned DivRemResultReg; // Register containing the desired result.
1871 bool IsOpSigned; // Whether to use signed or unsigned form.
1872 } ResultTable[NumOps];
1873 } OpTable[NumTypes] = {
1874 { &X86::GR8RegClass, X86::AX, 0, {
1875 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv
1876 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem
1877 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv
1878 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem
1879 }
1880 }, // i8
1881 { &X86::GR16RegClass, X86::AX, X86::DX, {
1882 { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv
1883 { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem
1884 { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv
1885 { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem
1886 }
1887 }, // i16
1888 { &X86::GR32RegClass, X86::EAX, X86::EDX, {
1889 { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv
1890 { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem
1891 { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv
1892 { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem
1893 }
1894 }, // i32
1895 { &X86::GR64RegClass, X86::RAX, X86::RDX, {
1896 { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv
1897 { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem
1898 { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv
1899 { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem
1900 }
1901 }, // i64
1902 };
1903
1904 MVT VT;
1905 if (!isTypeLegal(I->getType(), VT))
1906 return false;
1907
1908 unsigned TypeIndex, OpIndex;
1909 switch (VT.SimpleTy) {
1910 default: return false;
1911 case MVT::i8: TypeIndex = 0; break;
1912 case MVT::i16: TypeIndex = 1; break;
1913 case MVT::i32: TypeIndex = 2; break;
1914 case MVT::i64: TypeIndex = 3;
1915 if (!Subtarget->is64Bit())
1916 return false;
1917 break;
1918 }
1919
1920 switch (I->getOpcode()) {
1921 default: llvm_unreachable("Unexpected div/rem opcode");
1922 case Instruction::SDiv: OpIndex = 0; break;
1923 case Instruction::SRem: OpIndex = 1; break;
1924 case Instruction::UDiv: OpIndex = 2; break;
1925 case Instruction::URem: OpIndex = 3; break;
1926 }
1927
1928 const DivRemEntry &TypeEntry = OpTable[TypeIndex];
1929 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
1930 Register Op0Reg = getRegForValue(I->getOperand(0));
1931 if (Op0Reg == 0)
1932 return false;
1933 Register Op1Reg = getRegForValue(I->getOperand(1));
1934 if (Op1Reg == 0)
1935 return false;
1936
1937 // Move op0 into low-order input register.
1938 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1939 TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
1940 // Zero-extend or sign-extend into high-order input register.
1941 if (OpEntry.OpSignExtend) {
1942 if (OpEntry.IsOpSigned)
1943 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1944 TII.get(OpEntry.OpSignExtend));
1945 else {
1946 Register Zero32 = createResultReg(&X86::GR32RegClass);
1947 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1948 TII.get(X86::MOV32r0), Zero32);
1949
1950 // Copy the zero into the appropriate sub/super/identical physical
1951 // register. Unfortunately the operations needed are not uniform enough
1952 // to fit neatly into the table above.
1953 if (VT == MVT::i16) {
1954 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1955 TII.get(Copy), TypeEntry.HighInReg)
1956 .addReg(Zero32, 0, X86::sub_16bit);
1957 } else if (VT == MVT::i32) {
1958 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1959 TII.get(Copy), TypeEntry.HighInReg)
1960 .addReg(Zero32);
1961 } else if (VT == MVT::i64) {
1962 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1963 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1964 .addImm(0).addReg(Zero32).addImm(X86::sub_32bit);
1965 }
1966 }
1967 }
1968 // Generate the DIV/IDIV instruction.
1969 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1970 TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
1971 // For i8 remainder, we can't reference ah directly, as we'll end
1972 // up with bogus copies like %r9b = COPY %ah. Reference ax
1973 // instead to prevent ah references in a rex instruction.
1974 //
1975 // The current assumption of the fast register allocator is that isel
1976 // won't generate explicit references to the GR8_NOREX registers. If
1977 // the allocator and/or the backend get enhanced to be more robust in
1978 // that regard, this can be, and should be, removed.
1979 unsigned ResultReg = 0;
1980 if ((I->getOpcode() == Instruction::SRem ||
1981 I->getOpcode() == Instruction::URem) &&
1982 OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
1983 Register SourceSuperReg = createResultReg(&X86::GR16RegClass);
1984 Register ResultSuperReg = createResultReg(&X86::GR16RegClass);
1985 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1986 TII.get(Copy), SourceSuperReg).addReg(X86::AX);
1987
1988 // Shift AX right by 8 bits instead of using AH.
1989 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SHR16ri),
1990 ResultSuperReg).addReg(SourceSuperReg).addImm(8);
1991
1992 // Now reference the 8-bit subreg of the result.
1993 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
1994 /*Op0IsKill=*/true, X86::sub_8bit);
1995 }
1996 // Copy the result out of the physreg if we haven't already.
1997 if (!ResultReg) {
1998 ResultReg = createResultReg(TypeEntry.RC);
1999 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg)
2000 .addReg(OpEntry.DivRemResultReg);
2001 }
2002 updateValueMap(I, ResultReg);
2003
2004 return true;
2005 }
2006
2007 /// Emit a conditional move instruction (if the are supported) to lower
2008 /// the select.
X86FastEmitCMoveSelect(MVT RetVT,const Instruction * I)2009 bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
2010 // Check if the subtarget supports these instructions.
2011 if (!Subtarget->hasCMov())
2012 return false;
2013
2014 // FIXME: Add support for i8.
2015 if (RetVT < MVT::i16 || RetVT > MVT::i64)
2016 return false;
2017
2018 const Value *Cond = I->getOperand(0);
2019 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2020 bool NeedTest = true;
2021 X86::CondCode CC = X86::COND_NE;
2022
2023 // Optimize conditions coming from a compare if both instructions are in the
2024 // same basic block (values defined in other basic blocks may not have
2025 // initialized registers).
2026 const auto *CI = dyn_cast<CmpInst>(Cond);
2027 if (CI && (CI->getParent() == I->getParent())) {
2028 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2029
2030 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
2031 static const uint16_t SETFOpcTable[2][3] = {
2032 { X86::COND_NP, X86::COND_E, X86::TEST8rr },
2033 { X86::COND_P, X86::COND_NE, X86::OR8rr }
2034 };
2035 const uint16_t *SETFOpc = nullptr;
2036 switch (Predicate) {
2037 default: break;
2038 case CmpInst::FCMP_OEQ:
2039 SETFOpc = &SETFOpcTable[0][0];
2040 Predicate = CmpInst::ICMP_NE;
2041 break;
2042 case CmpInst::FCMP_UNE:
2043 SETFOpc = &SETFOpcTable[1][0];
2044 Predicate = CmpInst::ICMP_NE;
2045 break;
2046 }
2047
2048 bool NeedSwap;
2049 std::tie(CC, NeedSwap) = X86::getX86ConditionCode(Predicate);
2050 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
2051
2052 const Value *CmpLHS = CI->getOperand(0);
2053 const Value *CmpRHS = CI->getOperand(1);
2054 if (NeedSwap)
2055 std::swap(CmpLHS, CmpRHS);
2056
2057 EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
2058 // Emit a compare of the LHS and RHS, setting the flags.
2059 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
2060 return false;
2061
2062 if (SETFOpc) {
2063 Register FlagReg1 = createResultReg(&X86::GR8RegClass);
2064 Register FlagReg2 = createResultReg(&X86::GR8RegClass);
2065 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
2066 FlagReg1).addImm(SETFOpc[0]);
2067 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
2068 FlagReg2).addImm(SETFOpc[1]);
2069 auto const &II = TII.get(SETFOpc[2]);
2070 if (II.getNumDefs()) {
2071 Register TmpReg = createResultReg(&X86::GR8RegClass);
2072 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg)
2073 .addReg(FlagReg2).addReg(FlagReg1);
2074 } else {
2075 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2076 .addReg(FlagReg2).addReg(FlagReg1);
2077 }
2078 }
2079 NeedTest = false;
2080 } else if (foldX86XALUIntrinsic(CC, I, Cond)) {
2081 // Fake request the condition, otherwise the intrinsic might be completely
2082 // optimized away.
2083 Register TmpReg = getRegForValue(Cond);
2084 if (TmpReg == 0)
2085 return false;
2086
2087 NeedTest = false;
2088 }
2089
2090 if (NeedTest) {
2091 // Selects operate on i1, however, CondReg is 8 bits width and may contain
2092 // garbage. Indeed, only the less significant bit is supposed to be
2093 // accurate. If we read more than the lsb, we may see non-zero values
2094 // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
2095 // the select. This is achieved by performing TEST against 1.
2096 Register CondReg = getRegForValue(Cond);
2097 if (CondReg == 0)
2098 return false;
2099 bool CondIsKill = hasTrivialKill(Cond);
2100
2101 // In case OpReg is a K register, COPY to a GPR
2102 if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2103 unsigned KCondReg = CondReg;
2104 CondReg = createResultReg(&X86::GR32RegClass);
2105 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2106 TII.get(TargetOpcode::COPY), CondReg)
2107 .addReg(KCondReg, getKillRegState(CondIsKill));
2108 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Op0IsKill=*/true,
2109 X86::sub_8bit);
2110 }
2111 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
2112 .addReg(CondReg, getKillRegState(CondIsKill))
2113 .addImm(1);
2114 }
2115
2116 const Value *LHS = I->getOperand(1);
2117 const Value *RHS = I->getOperand(2);
2118
2119 Register RHSReg = getRegForValue(RHS);
2120 bool RHSIsKill = hasTrivialKill(RHS);
2121
2122 Register LHSReg = getRegForValue(LHS);
2123 bool LHSIsKill = hasTrivialKill(LHS);
2124
2125 if (!LHSReg || !RHSReg)
2126 return false;
2127
2128 const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo();
2129 unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC)/8);
2130 Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill,
2131 LHSReg, LHSIsKill, CC);
2132 updateValueMap(I, ResultReg);
2133 return true;
2134 }
2135
2136 /// Emit SSE or AVX instructions to lower the select.
2137 ///
2138 /// Try to use SSE1/SSE2 instructions to simulate a select without branches.
2139 /// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary
2140 /// SSE instructions are available. If AVX is available, try to use a VBLENDV.
X86FastEmitSSESelect(MVT RetVT,const Instruction * I)2141 bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
2142 // Optimize conditions coming from a compare if both instructions are in the
2143 // same basic block (values defined in other basic blocks may not have
2144 // initialized registers).
2145 const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));
2146 if (!CI || (CI->getParent() != I->getParent()))
2147 return false;
2148
2149 if (I->getType() != CI->getOperand(0)->getType() ||
2150 !((Subtarget->hasSSE1() && RetVT == MVT::f32) ||
2151 (Subtarget->hasSSE2() && RetVT == MVT::f64)))
2152 return false;
2153
2154 const Value *CmpLHS = CI->getOperand(0);
2155 const Value *CmpRHS = CI->getOperand(1);
2156 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2157
2158 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
2159 // We don't have to materialize a zero constant for this case and can just use
2160 // %x again on the RHS.
2161 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
2162 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
2163 if (CmpRHSC && CmpRHSC->isNullValue())
2164 CmpRHS = CmpLHS;
2165 }
2166
2167 unsigned CC;
2168 bool NeedSwap;
2169 std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);
2170 if (CC > 7 && !Subtarget->hasAVX())
2171 return false;
2172
2173 if (NeedSwap)
2174 std::swap(CmpLHS, CmpRHS);
2175
2176 const Value *LHS = I->getOperand(1);
2177 const Value *RHS = I->getOperand(2);
2178
2179 Register LHSReg = getRegForValue(LHS);
2180 bool LHSIsKill = hasTrivialKill(LHS);
2181
2182 Register RHSReg = getRegForValue(RHS);
2183 bool RHSIsKill = hasTrivialKill(RHS);
2184
2185 Register CmpLHSReg = getRegForValue(CmpLHS);
2186 bool CmpLHSIsKill = hasTrivialKill(CmpLHS);
2187
2188 Register CmpRHSReg = getRegForValue(CmpRHS);
2189 bool CmpRHSIsKill = hasTrivialKill(CmpRHS);
2190
2191 if (!LHSReg || !RHSReg || !CmpLHSReg || !CmpRHSReg)
2192 return false;
2193
2194 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2195 unsigned ResultReg;
2196
2197 if (Subtarget->hasAVX512()) {
2198 // If we have AVX512 we can use a mask compare and masked movss/sd.
2199 const TargetRegisterClass *VR128X = &X86::VR128XRegClass;
2200 const TargetRegisterClass *VK1 = &X86::VK1RegClass;
2201
2202 unsigned CmpOpcode =
2203 (RetVT == MVT::f32) ? X86::VCMPSSZrr : X86::VCMPSDZrr;
2204 Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpLHSIsKill,
2205 CmpRHSReg, CmpRHSIsKill, CC);
2206
2207 // Need an IMPLICIT_DEF for the input that is used to generate the upper
2208 // bits of the result register since its not based on any of the inputs.
2209 Register ImplicitDefReg = createResultReg(VR128X);
2210 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2211 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2212
2213 // Place RHSReg is the passthru of the masked movss/sd operation and put
2214 // LHS in the input. The mask input comes from the compare.
2215 unsigned MovOpcode =
2216 (RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk;
2217 unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, RHSIsKill,
2218 CmpReg, true, ImplicitDefReg, true,
2219 LHSReg, LHSIsKill);
2220
2221 ResultReg = createResultReg(RC);
2222 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2223 TII.get(TargetOpcode::COPY), ResultReg).addReg(MovReg);
2224
2225 } else if (Subtarget->hasAVX()) {
2226 const TargetRegisterClass *VR128 = &X86::VR128RegClass;
2227
2228 // If we have AVX, create 1 blendv instead of 3 logic instructions.
2229 // Blendv was introduced with SSE 4.1, but the 2 register form implicitly
2230 // uses XMM0 as the selection register. That may need just as many
2231 // instructions as the AND/ANDN/OR sequence due to register moves, so
2232 // don't bother.
2233 unsigned CmpOpcode =
2234 (RetVT == MVT::f32) ? X86::VCMPSSrr : X86::VCMPSDrr;
2235 unsigned BlendOpcode =
2236 (RetVT == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr;
2237
2238 Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpLHSIsKill,
2239 CmpRHSReg, CmpRHSIsKill, CC);
2240 Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill,
2241 LHSReg, LHSIsKill, CmpReg, true);
2242 ResultReg = createResultReg(RC);
2243 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2244 TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg);
2245 } else {
2246 // Choose the SSE instruction sequence based on data type (float or double).
2247 static const uint16_t OpcTable[2][4] = {
2248 { X86::CMPSSrr, X86::ANDPSrr, X86::ANDNPSrr, X86::ORPSrr },
2249 { X86::CMPSDrr, X86::ANDPDrr, X86::ANDNPDrr, X86::ORPDrr }
2250 };
2251
2252 const uint16_t *Opc = nullptr;
2253 switch (RetVT.SimpleTy) {
2254 default: return false;
2255 case MVT::f32: Opc = &OpcTable[0][0]; break;
2256 case MVT::f64: Opc = &OpcTable[1][0]; break;
2257 }
2258
2259 const TargetRegisterClass *VR128 = &X86::VR128RegClass;
2260 Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
2261 CmpRHSReg, CmpRHSIsKill, CC);
2262 Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg,
2263 /*Op0IsKill=*/false, LHSReg, LHSIsKill);
2264 Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg,
2265 /*Op0IsKill=*/true, RHSReg, RHSIsKill);
2266 Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, /*Op0IsKill=*/true,
2267 AndReg, /*Op1IsKill=*/true);
2268 ResultReg = createResultReg(RC);
2269 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2270 TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg);
2271 }
2272 updateValueMap(I, ResultReg);
2273 return true;
2274 }
2275
X86FastEmitPseudoSelect(MVT RetVT,const Instruction * I)2276 bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
2277 // These are pseudo CMOV instructions and will be later expanded into control-
2278 // flow.
2279 unsigned Opc;
2280 switch (RetVT.SimpleTy) {
2281 default: return false;
2282 case MVT::i8: Opc = X86::CMOV_GR8; break;
2283 case MVT::i16: Opc = X86::CMOV_GR16; break;
2284 case MVT::i32: Opc = X86::CMOV_GR32; break;
2285 case MVT::f32: Opc = Subtarget->hasAVX512() ? X86::CMOV_FR32X
2286 : X86::CMOV_FR32; break;
2287 case MVT::f64: Opc = Subtarget->hasAVX512() ? X86::CMOV_FR64X
2288 : X86::CMOV_FR64; break;
2289 }
2290
2291 const Value *Cond = I->getOperand(0);
2292 X86::CondCode CC = X86::COND_NE;
2293
2294 // Optimize conditions coming from a compare if both instructions are in the
2295 // same basic block (values defined in other basic blocks may not have
2296 // initialized registers).
2297 const auto *CI = dyn_cast<CmpInst>(Cond);
2298 if (CI && (CI->getParent() == I->getParent())) {
2299 bool NeedSwap;
2300 std::tie(CC, NeedSwap) = X86::getX86ConditionCode(CI->getPredicate());
2301 if (CC > X86::LAST_VALID_COND)
2302 return false;
2303
2304 const Value *CmpLHS = CI->getOperand(0);
2305 const Value *CmpRHS = CI->getOperand(1);
2306
2307 if (NeedSwap)
2308 std::swap(CmpLHS, CmpRHS);
2309
2310 EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
2311 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
2312 return false;
2313 } else {
2314 Register CondReg = getRegForValue(Cond);
2315 if (CondReg == 0)
2316 return false;
2317 bool CondIsKill = hasTrivialKill(Cond);
2318
2319 // In case OpReg is a K register, COPY to a GPR
2320 if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2321 unsigned KCondReg = CondReg;
2322 CondReg = createResultReg(&X86::GR32RegClass);
2323 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2324 TII.get(TargetOpcode::COPY), CondReg)
2325 .addReg(KCondReg, getKillRegState(CondIsKill));
2326 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Op0IsKill=*/true,
2327 X86::sub_8bit);
2328 }
2329 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
2330 .addReg(CondReg, getKillRegState(CondIsKill))
2331 .addImm(1);
2332 }
2333
2334 const Value *LHS = I->getOperand(1);
2335 const Value *RHS = I->getOperand(2);
2336
2337 Register LHSReg = getRegForValue(LHS);
2338 bool LHSIsKill = hasTrivialKill(LHS);
2339
2340 Register RHSReg = getRegForValue(RHS);
2341 bool RHSIsKill = hasTrivialKill(RHS);
2342
2343 if (!LHSReg || !RHSReg)
2344 return false;
2345
2346 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2347
2348 Register ResultReg =
2349 fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
2350 updateValueMap(I, ResultReg);
2351 return true;
2352 }
2353
X86SelectSelect(const Instruction * I)2354 bool X86FastISel::X86SelectSelect(const Instruction *I) {
2355 MVT RetVT;
2356 if (!isTypeLegal(I->getType(), RetVT))
2357 return false;
2358
2359 // Check if we can fold the select.
2360 if (const auto *CI = dyn_cast<CmpInst>(I->getOperand(0))) {
2361 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2362 const Value *Opnd = nullptr;
2363 switch (Predicate) {
2364 default: break;
2365 case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break;
2366 case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break;
2367 }
2368 // No need for a select anymore - this is an unconditional move.
2369 if (Opnd) {
2370 Register OpReg = getRegForValue(Opnd);
2371 if (OpReg == 0)
2372 return false;
2373 bool OpIsKill = hasTrivialKill(Opnd);
2374 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2375 Register ResultReg = createResultReg(RC);
2376 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2377 TII.get(TargetOpcode::COPY), ResultReg)
2378 .addReg(OpReg, getKillRegState(OpIsKill));
2379 updateValueMap(I, ResultReg);
2380 return true;
2381 }
2382 }
2383
2384 // First try to use real conditional move instructions.
2385 if (X86FastEmitCMoveSelect(RetVT, I))
2386 return true;
2387
2388 // Try to use a sequence of SSE instructions to simulate a conditional move.
2389 if (X86FastEmitSSESelect(RetVT, I))
2390 return true;
2391
2392 // Fall-back to pseudo conditional move instructions, which will be later
2393 // converted to control-flow.
2394 if (X86FastEmitPseudoSelect(RetVT, I))
2395 return true;
2396
2397 return false;
2398 }
2399
2400 // Common code for X86SelectSIToFP and X86SelectUIToFP.
X86SelectIntToFP(const Instruction * I,bool IsSigned)2401 bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) {
2402 // The target-independent selection algorithm in FastISel already knows how
2403 // to select a SINT_TO_FP if the target is SSE but not AVX.
2404 // Early exit if the subtarget doesn't have AVX.
2405 // Unsigned conversion requires avx512.
2406 bool HasAVX512 = Subtarget->hasAVX512();
2407 if (!Subtarget->hasAVX() || (!IsSigned && !HasAVX512))
2408 return false;
2409
2410 // TODO: We could sign extend narrower types.
2411 MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
2412 if (SrcVT != MVT::i32 && SrcVT != MVT::i64)
2413 return false;
2414
2415 // Select integer to float/double conversion.
2416 Register OpReg = getRegForValue(I->getOperand(0));
2417 if (OpReg == 0)
2418 return false;
2419
2420 unsigned Opcode;
2421
2422 static const uint16_t SCvtOpc[2][2][2] = {
2423 { { X86::VCVTSI2SSrr, X86::VCVTSI642SSrr },
2424 { X86::VCVTSI2SDrr, X86::VCVTSI642SDrr } },
2425 { { X86::VCVTSI2SSZrr, X86::VCVTSI642SSZrr },
2426 { X86::VCVTSI2SDZrr, X86::VCVTSI642SDZrr } },
2427 };
2428 static const uint16_t UCvtOpc[2][2] = {
2429 { X86::VCVTUSI2SSZrr, X86::VCVTUSI642SSZrr },
2430 { X86::VCVTUSI2SDZrr, X86::VCVTUSI642SDZrr },
2431 };
2432 bool Is64Bit = SrcVT == MVT::i64;
2433
2434 if (I->getType()->isDoubleTy()) {
2435 // s/uitofp int -> double
2436 Opcode = IsSigned ? SCvtOpc[HasAVX512][1][Is64Bit] : UCvtOpc[1][Is64Bit];
2437 } else if (I->getType()->isFloatTy()) {
2438 // s/uitofp int -> float
2439 Opcode = IsSigned ? SCvtOpc[HasAVX512][0][Is64Bit] : UCvtOpc[0][Is64Bit];
2440 } else
2441 return false;
2442
2443 MVT DstVT = TLI.getValueType(DL, I->getType()).getSimpleVT();
2444 const TargetRegisterClass *RC = TLI.getRegClassFor(DstVT);
2445 Register ImplicitDefReg = createResultReg(RC);
2446 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2447 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2448 Register ResultReg =
2449 fastEmitInst_rr(Opcode, RC, ImplicitDefReg, true, OpReg, false);
2450 updateValueMap(I, ResultReg);
2451 return true;
2452 }
2453
X86SelectSIToFP(const Instruction * I)2454 bool X86FastISel::X86SelectSIToFP(const Instruction *I) {
2455 return X86SelectIntToFP(I, /*IsSigned*/true);
2456 }
2457
X86SelectUIToFP(const Instruction * I)2458 bool X86FastISel::X86SelectUIToFP(const Instruction *I) {
2459 return X86SelectIntToFP(I, /*IsSigned*/false);
2460 }
2461
2462 // Helper method used by X86SelectFPExt and X86SelectFPTrunc.
X86SelectFPExtOrFPTrunc(const Instruction * I,unsigned TargetOpc,const TargetRegisterClass * RC)2463 bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I,
2464 unsigned TargetOpc,
2465 const TargetRegisterClass *RC) {
2466 assert((I->getOpcode() == Instruction::FPExt ||
2467 I->getOpcode() == Instruction::FPTrunc) &&
2468 "Instruction must be an FPExt or FPTrunc!");
2469 bool HasAVX = Subtarget->hasAVX();
2470
2471 Register OpReg = getRegForValue(I->getOperand(0));
2472 if (OpReg == 0)
2473 return false;
2474
2475 unsigned ImplicitDefReg;
2476 if (HasAVX) {
2477 ImplicitDefReg = createResultReg(RC);
2478 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2479 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2480
2481 }
2482
2483 Register ResultReg = createResultReg(RC);
2484 MachineInstrBuilder MIB;
2485 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpc),
2486 ResultReg);
2487
2488 if (HasAVX)
2489 MIB.addReg(ImplicitDefReg);
2490
2491 MIB.addReg(OpReg);
2492 updateValueMap(I, ResultReg);
2493 return true;
2494 }
2495
X86SelectFPExt(const Instruction * I)2496 bool X86FastISel::X86SelectFPExt(const Instruction *I) {
2497 if (X86ScalarSSEf64 && I->getType()->isDoubleTy() &&
2498 I->getOperand(0)->getType()->isFloatTy()) {
2499 bool HasAVX512 = Subtarget->hasAVX512();
2500 // fpext from float to double.
2501 unsigned Opc =
2502 HasAVX512 ? X86::VCVTSS2SDZrr
2503 : Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr;
2504 return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f64));
2505 }
2506
2507 return false;
2508 }
2509
X86SelectFPTrunc(const Instruction * I)2510 bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
2511 if (X86ScalarSSEf64 && I->getType()->isFloatTy() &&
2512 I->getOperand(0)->getType()->isDoubleTy()) {
2513 bool HasAVX512 = Subtarget->hasAVX512();
2514 // fptrunc from double to float.
2515 unsigned Opc =
2516 HasAVX512 ? X86::VCVTSD2SSZrr
2517 : Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr;
2518 return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f32));
2519 }
2520
2521 return false;
2522 }
2523
X86SelectTrunc(const Instruction * I)2524 bool X86FastISel::X86SelectTrunc(const Instruction *I) {
2525 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
2526 EVT DstVT = TLI.getValueType(DL, I->getType());
2527
2528 // This code only handles truncation to byte.
2529 if (DstVT != MVT::i8 && DstVT != MVT::i1)
2530 return false;
2531 if (!TLI.isTypeLegal(SrcVT))
2532 return false;
2533
2534 Register InputReg = getRegForValue(I->getOperand(0));
2535 if (!InputReg)
2536 // Unhandled operand. Halt "fast" selection and bail.
2537 return false;
2538
2539 if (SrcVT == MVT::i8) {
2540 // Truncate from i8 to i1; no code needed.
2541 updateValueMap(I, InputReg);
2542 return true;
2543 }
2544
2545 // Issue an extract_subreg.
2546 Register ResultReg = fastEmitInst_extractsubreg(MVT::i8,
2547 InputReg, false,
2548 X86::sub_8bit);
2549 if (!ResultReg)
2550 return false;
2551
2552 updateValueMap(I, ResultReg);
2553 return true;
2554 }
2555
IsMemcpySmall(uint64_t Len)2556 bool X86FastISel::IsMemcpySmall(uint64_t Len) {
2557 return Len <= (Subtarget->is64Bit() ? 32 : 16);
2558 }
2559
TryEmitSmallMemcpy(X86AddressMode DestAM,X86AddressMode SrcAM,uint64_t Len)2560 bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
2561 X86AddressMode SrcAM, uint64_t Len) {
2562
2563 // Make sure we don't bloat code by inlining very large memcpy's.
2564 if (!IsMemcpySmall(Len))
2565 return false;
2566
2567 bool i64Legal = Subtarget->is64Bit();
2568
2569 // We don't care about alignment here since we just emit integer accesses.
2570 while (Len) {
2571 MVT VT;
2572 if (Len >= 8 && i64Legal)
2573 VT = MVT::i64;
2574 else if (Len >= 4)
2575 VT = MVT::i32;
2576 else if (Len >= 2)
2577 VT = MVT::i16;
2578 else
2579 VT = MVT::i8;
2580
2581 unsigned Reg;
2582 bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
2583 RV &= X86FastEmitStore(VT, Reg, /*ValIsKill=*/true, DestAM);
2584 assert(RV && "Failed to emit load or store??");
2585
2586 unsigned Size = VT.getSizeInBits()/8;
2587 Len -= Size;
2588 DestAM.Disp += Size;
2589 SrcAM.Disp += Size;
2590 }
2591
2592 return true;
2593 }
2594
fastLowerIntrinsicCall(const IntrinsicInst * II)2595 bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
2596 // FIXME: Handle more intrinsics.
2597 switch (II->getIntrinsicID()) {
2598 default: return false;
2599 case Intrinsic::convert_from_fp16:
2600 case Intrinsic::convert_to_fp16: {
2601 if (Subtarget->useSoftFloat() || !Subtarget->hasF16C())
2602 return false;
2603
2604 const Value *Op = II->getArgOperand(0);
2605 Register InputReg = getRegForValue(Op);
2606 if (InputReg == 0)
2607 return false;
2608
2609 // F16C only allows converting from float to half and from half to float.
2610 bool IsFloatToHalf = II->getIntrinsicID() == Intrinsic::convert_to_fp16;
2611 if (IsFloatToHalf) {
2612 if (!Op->getType()->isFloatTy())
2613 return false;
2614 } else {
2615 if (!II->getType()->isFloatTy())
2616 return false;
2617 }
2618
2619 unsigned ResultReg = 0;
2620 const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::v8i16);
2621 if (IsFloatToHalf) {
2622 // 'InputReg' is implicitly promoted from register class FR32 to
2623 // register class VR128 by method 'constrainOperandRegClass' which is
2624 // directly called by 'fastEmitInst_ri'.
2625 // Instruction VCVTPS2PHrr takes an extra immediate operand which is
2626 // used to provide rounding control: use MXCSR.RC, encoded as 0b100.
2627 // It's consistent with the other FP instructions, which are usually
2628 // controlled by MXCSR.
2629 unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPS2PHZ128rr
2630 : X86::VCVTPS2PHrr;
2631 InputReg = fastEmitInst_ri(Opc, RC, InputReg, false, 4);
2632
2633 // Move the lower 32-bits of ResultReg to another register of class GR32.
2634 Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr
2635 : X86::VMOVPDI2DIrr;
2636 ResultReg = createResultReg(&X86::GR32RegClass);
2637 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
2638 .addReg(InputReg, RegState::Kill);
2639
2640 // The result value is in the lower 16-bits of ResultReg.
2641 unsigned RegIdx = X86::sub_16bit;
2642 ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx);
2643 } else {
2644 assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!");
2645 // Explicitly zero-extend the input to 32-bit.
2646 InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg,
2647 /*Op0IsKill=*/false);
2648
2649 // The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr.
2650 InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR,
2651 InputReg, /*Op0IsKill=*/true);
2652
2653 unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr
2654 : X86::VCVTPH2PSrr;
2655 InputReg = fastEmitInst_r(Opc, RC, InputReg, /*Op0IsKill=*/true);
2656
2657 // The result value is in the lower 32-bits of ResultReg.
2658 // Emit an explicit copy from register class VR128 to register class FR32.
2659 ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
2660 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2661 TII.get(TargetOpcode::COPY), ResultReg)
2662 .addReg(InputReg, RegState::Kill);
2663 }
2664
2665 updateValueMap(II, ResultReg);
2666 return true;
2667 }
2668 case Intrinsic::frameaddress: {
2669 MachineFunction *MF = FuncInfo.MF;
2670 if (MF->getTarget().getMCAsmInfo()->usesWindowsCFI())
2671 return false;
2672
2673 Type *RetTy = II->getCalledFunction()->getReturnType();
2674
2675 MVT VT;
2676 if (!isTypeLegal(RetTy, VT))
2677 return false;
2678
2679 unsigned Opc;
2680 const TargetRegisterClass *RC = nullptr;
2681
2682 switch (VT.SimpleTy) {
2683 default: llvm_unreachable("Invalid result type for frameaddress.");
2684 case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break;
2685 case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break;
2686 }
2687
2688 // This needs to be set before we call getPtrSizedFrameRegister, otherwise
2689 // we get the wrong frame register.
2690 MachineFrameInfo &MFI = MF->getFrameInfo();
2691 MFI.setFrameAddressIsTaken(true);
2692
2693 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2694 unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*MF);
2695 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
2696 (FrameReg == X86::EBP && VT == MVT::i32)) &&
2697 "Invalid Frame Register!");
2698
2699 // Always make a copy of the frame register to a vreg first, so that we
2700 // never directly reference the frame register (the TwoAddressInstruction-
2701 // Pass doesn't like that).
2702 Register SrcReg = createResultReg(RC);
2703 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2704 TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);
2705
2706 // Now recursively load from the frame address.
2707 // movq (%rbp), %rax
2708 // movq (%rax), %rax
2709 // movq (%rax), %rax
2710 // ...
2711 unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
2712 while (Depth--) {
2713 Register DestReg = createResultReg(RC);
2714 addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2715 TII.get(Opc), DestReg), SrcReg);
2716 SrcReg = DestReg;
2717 }
2718
2719 updateValueMap(II, SrcReg);
2720 return true;
2721 }
2722 case Intrinsic::memcpy: {
2723 const MemCpyInst *MCI = cast<MemCpyInst>(II);
2724 // Don't handle volatile or variable length memcpys.
2725 if (MCI->isVolatile())
2726 return false;
2727
2728 if (isa<ConstantInt>(MCI->getLength())) {
2729 // Small memcpy's are common enough that we want to do them
2730 // without a call if possible.
2731 uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue();
2732 if (IsMemcpySmall(Len)) {
2733 X86AddressMode DestAM, SrcAM;
2734 if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||
2735 !X86SelectAddress(MCI->getRawSource(), SrcAM))
2736 return false;
2737 TryEmitSmallMemcpy(DestAM, SrcAM, Len);
2738 return true;
2739 }
2740 }
2741
2742 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2743 if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))
2744 return false;
2745
2746 if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
2747 return false;
2748
2749 return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 1);
2750 }
2751 case Intrinsic::memset: {
2752 const MemSetInst *MSI = cast<MemSetInst>(II);
2753
2754 if (MSI->isVolatile())
2755 return false;
2756
2757 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2758 if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))
2759 return false;
2760
2761 if (MSI->getDestAddressSpace() > 255)
2762 return false;
2763
2764 return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
2765 }
2766 case Intrinsic::stackprotector: {
2767 // Emit code to store the stack guard onto the stack.
2768 EVT PtrTy = TLI.getPointerTy(DL);
2769
2770 const Value *Op1 = II->getArgOperand(0); // The guard's value.
2771 const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));
2772
2773 MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
2774
2775 // Grab the frame index.
2776 X86AddressMode AM;
2777 if (!X86SelectAddress(Slot, AM)) return false;
2778 if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
2779 return true;
2780 }
2781 case Intrinsic::dbg_declare: {
2782 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
2783 X86AddressMode AM;
2784 assert(DI->getAddress() && "Null address should be checked earlier!");
2785 if (!X86SelectAddress(DI->getAddress(), AM))
2786 return false;
2787 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
2788 // FIXME may need to add RegState::Debug to any registers produced,
2789 // although ESP/EBP should be the only ones at the moment.
2790 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
2791 "Expected inlined-at fields to agree");
2792 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM)
2793 .addImm(0)
2794 .addMetadata(DI->getVariable())
2795 .addMetadata(DI->getExpression());
2796 return true;
2797 }
2798 case Intrinsic::trap: {
2799 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TRAP));
2800 return true;
2801 }
2802 case Intrinsic::sqrt: {
2803 if (!Subtarget->hasSSE1())
2804 return false;
2805
2806 Type *RetTy = II->getCalledFunction()->getReturnType();
2807
2808 MVT VT;
2809 if (!isTypeLegal(RetTy, VT))
2810 return false;
2811
2812 // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
2813 // is not generated by FastISel yet.
2814 // FIXME: Update this code once tablegen can handle it.
2815 static const uint16_t SqrtOpc[3][2] = {
2816 { X86::SQRTSSr, X86::SQRTSDr },
2817 { X86::VSQRTSSr, X86::VSQRTSDr },
2818 { X86::VSQRTSSZr, X86::VSQRTSDZr },
2819 };
2820 unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
2821 Subtarget->hasAVX() ? 1 :
2822 0;
2823 unsigned Opc;
2824 switch (VT.SimpleTy) {
2825 default: return false;
2826 case MVT::f32: Opc = SqrtOpc[AVXLevel][0]; break;
2827 case MVT::f64: Opc = SqrtOpc[AVXLevel][1]; break;
2828 }
2829
2830 const Value *SrcVal = II->getArgOperand(0);
2831 Register SrcReg = getRegForValue(SrcVal);
2832
2833 if (SrcReg == 0)
2834 return false;
2835
2836 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
2837 unsigned ImplicitDefReg = 0;
2838 if (AVXLevel > 0) {
2839 ImplicitDefReg = createResultReg(RC);
2840 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2841 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2842 }
2843
2844 Register ResultReg = createResultReg(RC);
2845 MachineInstrBuilder MIB;
2846 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
2847 ResultReg);
2848
2849 if (ImplicitDefReg)
2850 MIB.addReg(ImplicitDefReg);
2851
2852 MIB.addReg(SrcReg);
2853
2854 updateValueMap(II, ResultReg);
2855 return true;
2856 }
2857 case Intrinsic::sadd_with_overflow:
2858 case Intrinsic::uadd_with_overflow:
2859 case Intrinsic::ssub_with_overflow:
2860 case Intrinsic::usub_with_overflow:
2861 case Intrinsic::smul_with_overflow:
2862 case Intrinsic::umul_with_overflow: {
2863 // This implements the basic lowering of the xalu with overflow intrinsics
2864 // into add/sub/mul followed by either seto or setb.
2865 const Function *Callee = II->getCalledFunction();
2866 auto *Ty = cast<StructType>(Callee->getReturnType());
2867 Type *RetTy = Ty->getTypeAtIndex(0U);
2868 assert(Ty->getTypeAtIndex(1)->isIntegerTy() &&
2869 Ty->getTypeAtIndex(1)->getScalarSizeInBits() == 1 &&
2870 "Overflow value expected to be an i1");
2871
2872 MVT VT;
2873 if (!isTypeLegal(RetTy, VT))
2874 return false;
2875
2876 if (VT < MVT::i8 || VT > MVT::i64)
2877 return false;
2878
2879 const Value *LHS = II->getArgOperand(0);
2880 const Value *RHS = II->getArgOperand(1);
2881
2882 // Canonicalize immediate to the RHS.
2883 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) && II->isCommutative())
2884 std::swap(LHS, RHS);
2885
2886 unsigned BaseOpc, CondCode;
2887 switch (II->getIntrinsicID()) {
2888 default: llvm_unreachable("Unexpected intrinsic!");
2889 case Intrinsic::sadd_with_overflow:
2890 BaseOpc = ISD::ADD; CondCode = X86::COND_O; break;
2891 case Intrinsic::uadd_with_overflow:
2892 BaseOpc = ISD::ADD; CondCode = X86::COND_B; break;
2893 case Intrinsic::ssub_with_overflow:
2894 BaseOpc = ISD::SUB; CondCode = X86::COND_O; break;
2895 case Intrinsic::usub_with_overflow:
2896 BaseOpc = ISD::SUB; CondCode = X86::COND_B; break;
2897 case Intrinsic::smul_with_overflow:
2898 BaseOpc = X86ISD::SMUL; CondCode = X86::COND_O; break;
2899 case Intrinsic::umul_with_overflow:
2900 BaseOpc = X86ISD::UMUL; CondCode = X86::COND_O; break;
2901 }
2902
2903 Register LHSReg = getRegForValue(LHS);
2904 if (LHSReg == 0)
2905 return false;
2906 bool LHSIsKill = hasTrivialKill(LHS);
2907
2908 unsigned ResultReg = 0;
2909 // Check if we have an immediate version.
2910 if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
2911 static const uint16_t Opc[2][4] = {
2912 { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
2913 { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
2914 };
2915
2916 if (CI->isOne() && (BaseOpc == ISD::ADD || BaseOpc == ISD::SUB) &&
2917 CondCode == X86::COND_O) {
2918 // We can use INC/DEC.
2919 ResultReg = createResultReg(TLI.getRegClassFor(VT));
2920 bool IsDec = BaseOpc == ISD::SUB;
2921 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2922 TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
2923 .addReg(LHSReg, getKillRegState(LHSIsKill));
2924 } else
2925 ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
2926 CI->getZExtValue());
2927 }
2928
2929 unsigned RHSReg;
2930 bool RHSIsKill;
2931 if (!ResultReg) {
2932 RHSReg = getRegForValue(RHS);
2933 if (RHSReg == 0)
2934 return false;
2935 RHSIsKill = hasTrivialKill(RHS);
2936 ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
2937 RHSIsKill);
2938 }
2939
2940 // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
2941 // it manually.
2942 if (BaseOpc == X86ISD::UMUL && !ResultReg) {
2943 static const uint16_t MULOpc[] =
2944 { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
2945 static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
2946 // First copy the first operand into RAX, which is an implicit input to
2947 // the X86::MUL*r instruction.
2948 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2949 TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
2950 .addReg(LHSReg, getKillRegState(LHSIsKill));
2951 ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
2952 TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
2953 } else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
2954 static const uint16_t MULOpc[] =
2955 { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
2956 if (VT == MVT::i8) {
2957 // Copy the first operand into AL, which is an implicit input to the
2958 // X86::IMUL8r instruction.
2959 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2960 TII.get(TargetOpcode::COPY), X86::AL)
2961 .addReg(LHSReg, getKillRegState(LHSIsKill));
2962 ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
2963 RHSIsKill);
2964 } else
2965 ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
2966 TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
2967 RHSReg, RHSIsKill);
2968 }
2969
2970 if (!ResultReg)
2971 return false;
2972
2973 // Assign to a GPR since the overflow return value is lowered to a SETcc.
2974 Register ResultReg2 = createResultReg(&X86::GR8RegClass);
2975 assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
2976 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
2977 ResultReg2).addImm(CondCode);
2978
2979 updateValueMap(II, ResultReg, 2);
2980 return true;
2981 }
2982 case Intrinsic::x86_sse_cvttss2si:
2983 case Intrinsic::x86_sse_cvttss2si64:
2984 case Intrinsic::x86_sse2_cvttsd2si:
2985 case Intrinsic::x86_sse2_cvttsd2si64: {
2986 bool IsInputDouble;
2987 switch (II->getIntrinsicID()) {
2988 default: llvm_unreachable("Unexpected intrinsic.");
2989 case Intrinsic::x86_sse_cvttss2si:
2990 case Intrinsic::x86_sse_cvttss2si64:
2991 if (!Subtarget->hasSSE1())
2992 return false;
2993 IsInputDouble = false;
2994 break;
2995 case Intrinsic::x86_sse2_cvttsd2si:
2996 case Intrinsic::x86_sse2_cvttsd2si64:
2997 if (!Subtarget->hasSSE2())
2998 return false;
2999 IsInputDouble = true;
3000 break;
3001 }
3002
3003 Type *RetTy = II->getCalledFunction()->getReturnType();
3004 MVT VT;
3005 if (!isTypeLegal(RetTy, VT))
3006 return false;
3007
3008 static const uint16_t CvtOpc[3][2][2] = {
3009 { { X86::CVTTSS2SIrr, X86::CVTTSS2SI64rr },
3010 { X86::CVTTSD2SIrr, X86::CVTTSD2SI64rr } },
3011 { { X86::VCVTTSS2SIrr, X86::VCVTTSS2SI64rr },
3012 { X86::VCVTTSD2SIrr, X86::VCVTTSD2SI64rr } },
3013 { { X86::VCVTTSS2SIZrr, X86::VCVTTSS2SI64Zrr },
3014 { X86::VCVTTSD2SIZrr, X86::VCVTTSD2SI64Zrr } },
3015 };
3016 unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
3017 Subtarget->hasAVX() ? 1 :
3018 0;
3019 unsigned Opc;
3020 switch (VT.SimpleTy) {
3021 default: llvm_unreachable("Unexpected result type.");
3022 case MVT::i32: Opc = CvtOpc[AVXLevel][IsInputDouble][0]; break;
3023 case MVT::i64: Opc = CvtOpc[AVXLevel][IsInputDouble][1]; break;
3024 }
3025
3026 // Check if we can fold insertelement instructions into the convert.
3027 const Value *Op = II->getArgOperand(0);
3028 while (auto *IE = dyn_cast<InsertElementInst>(Op)) {
3029 const Value *Index = IE->getOperand(2);
3030 if (!isa<ConstantInt>(Index))
3031 break;
3032 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
3033
3034 if (Idx == 0) {
3035 Op = IE->getOperand(1);
3036 break;
3037 }
3038 Op = IE->getOperand(0);
3039 }
3040
3041 Register Reg = getRegForValue(Op);
3042 if (Reg == 0)
3043 return false;
3044
3045 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3046 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
3047 .addReg(Reg);
3048
3049 updateValueMap(II, ResultReg);
3050 return true;
3051 }
3052 }
3053 }
3054
fastLowerArguments()3055 bool X86FastISel::fastLowerArguments() {
3056 if (!FuncInfo.CanLowerReturn)
3057 return false;
3058
3059 const Function *F = FuncInfo.Fn;
3060 if (F->isVarArg())
3061 return false;
3062
3063 CallingConv::ID CC = F->getCallingConv();
3064 if (CC != CallingConv::C)
3065 return false;
3066
3067 if (Subtarget->isCallingConvWin64(CC))
3068 return false;
3069
3070 if (!Subtarget->is64Bit())
3071 return false;
3072
3073 if (Subtarget->useSoftFloat())
3074 return false;
3075
3076 // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
3077 unsigned GPRCnt = 0;
3078 unsigned FPRCnt = 0;
3079 for (auto const &Arg : F->args()) {
3080 if (Arg.hasAttribute(Attribute::ByVal) ||
3081 Arg.hasAttribute(Attribute::InReg) ||
3082 Arg.hasAttribute(Attribute::StructRet) ||
3083 Arg.hasAttribute(Attribute::SwiftSelf) ||
3084 Arg.hasAttribute(Attribute::SwiftError) ||
3085 Arg.hasAttribute(Attribute::Nest))
3086 return false;
3087
3088 Type *ArgTy = Arg.getType();
3089 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
3090 return false;
3091
3092 EVT ArgVT = TLI.getValueType(DL, ArgTy);
3093 if (!ArgVT.isSimple()) return false;
3094 switch (ArgVT.getSimpleVT().SimpleTy) {
3095 default: return false;
3096 case MVT::i32:
3097 case MVT::i64:
3098 ++GPRCnt;
3099 break;
3100 case MVT::f32:
3101 case MVT::f64:
3102 if (!Subtarget->hasSSE1())
3103 return false;
3104 ++FPRCnt;
3105 break;
3106 }
3107
3108 if (GPRCnt > 6)
3109 return false;
3110
3111 if (FPRCnt > 8)
3112 return false;
3113 }
3114
3115 static const MCPhysReg GPR32ArgRegs[] = {
3116 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
3117 };
3118 static const MCPhysReg GPR64ArgRegs[] = {
3119 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
3120 };
3121 static const MCPhysReg XMMArgRegs[] = {
3122 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3123 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3124 };
3125
3126 unsigned GPRIdx = 0;
3127 unsigned FPRIdx = 0;
3128 for (auto const &Arg : F->args()) {
3129 MVT VT = TLI.getSimpleValueType(DL, Arg.getType());
3130 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
3131 unsigned SrcReg;
3132 switch (VT.SimpleTy) {
3133 default: llvm_unreachable("Unexpected value type.");
3134 case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
3135 case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;
3136 case MVT::f32: LLVM_FALLTHROUGH;
3137 case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
3138 }
3139 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3140 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
3141 // Without this, EmitLiveInCopies may eliminate the livein if its only
3142 // use is a bitcast (which isn't turned into an instruction).
3143 Register ResultReg = createResultReg(RC);
3144 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3145 TII.get(TargetOpcode::COPY), ResultReg)
3146 .addReg(DstReg, getKillRegState(true));
3147 updateValueMap(&Arg, ResultReg);
3148 }
3149 return true;
3150 }
3151
computeBytesPoppedByCalleeForSRet(const X86Subtarget * Subtarget,CallingConv::ID CC,const CallBase * CB)3152 static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget,
3153 CallingConv::ID CC,
3154 const CallBase *CB) {
3155 if (Subtarget->is64Bit())
3156 return 0;
3157 if (Subtarget->getTargetTriple().isOSMSVCRT())
3158 return 0;
3159 if (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3160 CC == CallingConv::HiPE || CC == CallingConv::Tail)
3161 return 0;
3162
3163 if (CB)
3164 if (CB->arg_empty() || !CB->paramHasAttr(0, Attribute::StructRet) ||
3165 CB->paramHasAttr(0, Attribute::InReg) || Subtarget->isTargetMCU())
3166 return 0;
3167
3168 return 4;
3169 }
3170
fastLowerCall(CallLoweringInfo & CLI)3171 bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
3172 auto &OutVals = CLI.OutVals;
3173 auto &OutFlags = CLI.OutFlags;
3174 auto &OutRegs = CLI.OutRegs;
3175 auto &Ins = CLI.Ins;
3176 auto &InRegs = CLI.InRegs;
3177 CallingConv::ID CC = CLI.CallConv;
3178 bool &IsTailCall = CLI.IsTailCall;
3179 bool IsVarArg = CLI.IsVarArg;
3180 const Value *Callee = CLI.Callee;
3181 MCSymbol *Symbol = CLI.Symbol;
3182
3183 bool Is64Bit = Subtarget->is64Bit();
3184 bool IsWin64 = Subtarget->isCallingConvWin64(CC);
3185
3186 const CallInst *CI = dyn_cast_or_null<CallInst>(CLI.CB);
3187 const Function *CalledFn = CI ? CI->getCalledFunction() : nullptr;
3188
3189 // Call / invoke instructions with NoCfCheck attribute require special
3190 // handling.
3191 const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CB);
3192 if ((CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck()))
3193 return false;
3194
3195 // Functions with no_caller_saved_registers that need special handling.
3196 if ((CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3197 (CalledFn && CalledFn->hasFnAttribute("no_caller_saved_registers")))
3198 return false;
3199
3200 // Functions using thunks for indirect calls need to use SDISel.
3201 if (Subtarget->useIndirectThunkCalls())
3202 return false;
3203
3204 // Handle only C, fastcc, and webkit_js calling conventions for now.
3205 switch (CC) {
3206 default: return false;
3207 case CallingConv::C:
3208 case CallingConv::Fast:
3209 case CallingConv::Tail:
3210 case CallingConv::WebKit_JS:
3211 case CallingConv::Swift:
3212 case CallingConv::X86_FastCall:
3213 case CallingConv::X86_StdCall:
3214 case CallingConv::X86_ThisCall:
3215 case CallingConv::Win64:
3216 case CallingConv::X86_64_SysV:
3217 case CallingConv::CFGuard_Check:
3218 break;
3219 }
3220
3221 // Allow SelectionDAG isel to handle tail calls.
3222 if (IsTailCall)
3223 return false;
3224
3225 // fastcc with -tailcallopt is intended to provide a guaranteed
3226 // tail call optimization. Fastisel doesn't know how to do that.
3227 if ((CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) ||
3228 CC == CallingConv::Tail)
3229 return false;
3230
3231 // Don't know how to handle Win64 varargs yet. Nothing special needed for
3232 // x86-32. Special handling for x86-64 is implemented.
3233 if (IsVarArg && IsWin64)
3234 return false;
3235
3236 // Don't know about inalloca yet.
3237 if (CLI.CB && CLI.CB->hasInAllocaArgument())
3238 return false;
3239
3240 for (auto Flag : CLI.OutFlags)
3241 if (Flag.isSwiftError() || Flag.isPreallocated())
3242 return false;
3243
3244 SmallVector<MVT, 16> OutVTs;
3245 SmallVector<unsigned, 16> ArgRegs;
3246
3247 // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra
3248 // instruction. This is safe because it is common to all FastISel supported
3249 // calling conventions on x86.
3250 for (int i = 0, e = OutVals.size(); i != e; ++i) {
3251 Value *&Val = OutVals[i];
3252 ISD::ArgFlagsTy Flags = OutFlags[i];
3253 if (auto *CI = dyn_cast<ConstantInt>(Val)) {
3254 if (CI->getBitWidth() < 32) {
3255 if (Flags.isSExt())
3256 Val = ConstantExpr::getSExt(CI, Type::getInt32Ty(CI->getContext()));
3257 else
3258 Val = ConstantExpr::getZExt(CI, Type::getInt32Ty(CI->getContext()));
3259 }
3260 }
3261
3262 // Passing bools around ends up doing a trunc to i1 and passing it.
3263 // Codegen this as an argument + "and 1".
3264 MVT VT;
3265 auto *TI = dyn_cast<TruncInst>(Val);
3266 unsigned ResultReg;
3267 if (TI && TI->getType()->isIntegerTy(1) && CLI.CB &&
3268 (TI->getParent() == CLI.CB->getParent()) && TI->hasOneUse()) {
3269 Value *PrevVal = TI->getOperand(0);
3270 ResultReg = getRegForValue(PrevVal);
3271
3272 if (!ResultReg)
3273 return false;
3274
3275 if (!isTypeLegal(PrevVal->getType(), VT))
3276 return false;
3277
3278 ResultReg =
3279 fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1);
3280 } else {
3281 if (!isTypeLegal(Val->getType(), VT) ||
3282 (VT.isVector() && VT.getVectorElementType() == MVT::i1))
3283 return false;
3284 ResultReg = getRegForValue(Val);
3285 }
3286
3287 if (!ResultReg)
3288 return false;
3289
3290 ArgRegs.push_back(ResultReg);
3291 OutVTs.push_back(VT);
3292 }
3293
3294 // Analyze operands of the call, assigning locations to each operand.
3295 SmallVector<CCValAssign, 16> ArgLocs;
3296 CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());
3297
3298 // Allocate shadow area for Win64
3299 if (IsWin64)
3300 CCInfo.AllocateStack(32, Align(8));
3301
3302 CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
3303
3304 // Get a count of how many bytes are to be pushed on the stack.
3305 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3306
3307 // Issue CALLSEQ_START
3308 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
3309 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
3310 .addImm(NumBytes).addImm(0).addImm(0);
3311
3312 // Walk the register/memloc assignments, inserting copies/loads.
3313 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3314 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3315 CCValAssign const &VA = ArgLocs[i];
3316 const Value *ArgVal = OutVals[VA.getValNo()];
3317 MVT ArgVT = OutVTs[VA.getValNo()];
3318
3319 if (ArgVT == MVT::x86mmx)
3320 return false;
3321
3322 unsigned ArgReg = ArgRegs[VA.getValNo()];
3323
3324 // Promote the value if needed.
3325 switch (VA.getLocInfo()) {
3326 case CCValAssign::Full: break;
3327 case CCValAssign::SExt: {
3328 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3329 "Unexpected extend");
3330
3331 if (ArgVT == MVT::i1)
3332 return false;
3333
3334 bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
3335 ArgVT, ArgReg);
3336 assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
3337 ArgVT = VA.getLocVT();
3338 break;
3339 }
3340 case CCValAssign::ZExt: {
3341 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3342 "Unexpected extend");
3343
3344 // Handle zero-extension from i1 to i8, which is common.
3345 if (ArgVT == MVT::i1) {
3346 // Set the high bits to zero.
3347 ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg, /*TODO: Kill=*/false);
3348 ArgVT = MVT::i8;
3349
3350 if (ArgReg == 0)
3351 return false;
3352 }
3353
3354 bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
3355 ArgVT, ArgReg);
3356 assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
3357 ArgVT = VA.getLocVT();
3358 break;
3359 }
3360 case CCValAssign::AExt: {
3361 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3362 "Unexpected extend");
3363 bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg,
3364 ArgVT, ArgReg);
3365 if (!Emitted)
3366 Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
3367 ArgVT, ArgReg);
3368 if (!Emitted)
3369 Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
3370 ArgVT, ArgReg);
3371
3372 assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
3373 ArgVT = VA.getLocVT();
3374 break;
3375 }
3376 case CCValAssign::BCvt: {
3377 ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
3378 /*TODO: Kill=*/false);
3379 assert(ArgReg && "Failed to emit a bitcast!");
3380 ArgVT = VA.getLocVT();
3381 break;
3382 }
3383 case CCValAssign::VExt:
3384 // VExt has not been implemented, so this should be impossible to reach
3385 // for now. However, fallback to Selection DAG isel once implemented.
3386 return false;
3387 case CCValAssign::AExtUpper:
3388 case CCValAssign::SExtUpper:
3389 case CCValAssign::ZExtUpper:
3390 case CCValAssign::FPExt:
3391 case CCValAssign::Trunc:
3392 llvm_unreachable("Unexpected loc info!");
3393 case CCValAssign::Indirect:
3394 // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
3395 // support this.
3396 return false;
3397 }
3398
3399 if (VA.isRegLoc()) {
3400 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3401 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
3402 OutRegs.push_back(VA.getLocReg());
3403 } else {
3404 assert(VA.isMemLoc() && "Unknown value location!");
3405
3406 // Don't emit stores for undef values.
3407 if (isa<UndefValue>(ArgVal))
3408 continue;
3409
3410 unsigned LocMemOffset = VA.getLocMemOffset();
3411 X86AddressMode AM;
3412 AM.Base.Reg = RegInfo->getStackRegister();
3413 AM.Disp = LocMemOffset;
3414 ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()];
3415 Align Alignment = DL.getABITypeAlign(ArgVal->getType());
3416 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
3417 MachinePointerInfo::getStack(*FuncInfo.MF, LocMemOffset),
3418 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
3419 if (Flags.isByVal()) {
3420 X86AddressMode SrcAM;
3421 SrcAM.Base.Reg = ArgReg;
3422 if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize()))
3423 return false;
3424 } else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
3425 // If this is a really simple value, emit this with the Value* version
3426 // of X86FastEmitStore. If it isn't simple, we don't want to do this,
3427 // as it can cause us to reevaluate the argument.
3428 if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
3429 return false;
3430 } else {
3431 bool ValIsKill = hasTrivialKill(ArgVal);
3432 if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO))
3433 return false;
3434 }
3435 }
3436 }
3437
3438 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3439 // GOT pointer.
3440 if (Subtarget->isPICStyleGOT()) {
3441 unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3442 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3443 TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
3444 }
3445
3446 if (Is64Bit && IsVarArg && !IsWin64) {
3447 // From AMD64 ABI document:
3448 // For calls that may call functions that use varargs or stdargs
3449 // (prototype-less calls or calls to functions containing ellipsis (...) in
3450 // the declaration) %al is used as hidden argument to specify the number
3451 // of SSE registers used. The contents of %al do not need to match exactly
3452 // the number of registers, but must be an ubound on the number of SSE
3453 // registers used and is in the range 0 - 8 inclusive.
3454
3455 // Count the number of XMM registers allocated.
3456 static const MCPhysReg XMMArgRegs[] = {
3457 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3458 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3459 };
3460 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3461 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3462 && "SSE registers cannot be used when SSE is disabled");
3463 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
3464 X86::AL).addImm(NumXMMRegs);
3465 }
3466
3467 // Materialize callee address in a register. FIXME: GV address can be
3468 // handled with a CALLpcrel32 instead.
3469 X86AddressMode CalleeAM;
3470 if (!X86SelectCallAddress(Callee, CalleeAM))
3471 return false;
3472
3473 unsigned CalleeOp = 0;
3474 const GlobalValue *GV = nullptr;
3475 if (CalleeAM.GV != nullptr) {
3476 GV = CalleeAM.GV;
3477 } else if (CalleeAM.Base.Reg != 0) {
3478 CalleeOp = CalleeAM.Base.Reg;
3479 } else
3480 return false;
3481
3482 // Issue the call.
3483 MachineInstrBuilder MIB;
3484 if (CalleeOp) {
3485 // Register-indirect call.
3486 unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
3487 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc))
3488 .addReg(CalleeOp);
3489 } else {
3490 // Direct call.
3491 assert(GV && "Not a direct call");
3492 // See if we need any target-specific flags on the GV operand.
3493 unsigned char OpFlags = Subtarget->classifyGlobalFunctionReference(GV);
3494
3495 // This will be a direct call, or an indirect call through memory for
3496 // NonLazyBind calls or dllimport calls.
3497 bool NeedLoad = OpFlags == X86II::MO_DLLIMPORT ||
3498 OpFlags == X86II::MO_GOTPCREL ||
3499 OpFlags == X86II::MO_COFFSTUB;
3500 unsigned CallOpc = NeedLoad
3501 ? (Is64Bit ? X86::CALL64m : X86::CALL32m)
3502 : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
3503
3504 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));
3505 if (NeedLoad)
3506 MIB.addReg(Is64Bit ? X86::RIP : 0).addImm(1).addReg(0);
3507 if (Symbol)
3508 MIB.addSym(Symbol, OpFlags);
3509 else
3510 MIB.addGlobalAddress(GV, 0, OpFlags);
3511 if (NeedLoad)
3512 MIB.addReg(0);
3513 }
3514
3515 // Add a register mask operand representing the call-preserved registers.
3516 // Proper defs for return values will be added by setPhysRegsDeadExcept().
3517 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
3518
3519 // Add an implicit use GOT pointer in EBX.
3520 if (Subtarget->isPICStyleGOT())
3521 MIB.addReg(X86::EBX, RegState::Implicit);
3522
3523 if (Is64Bit && IsVarArg && !IsWin64)
3524 MIB.addReg(X86::AL, RegState::Implicit);
3525
3526 // Add implicit physical register uses to the call.
3527 for (auto Reg : OutRegs)
3528 MIB.addReg(Reg, RegState::Implicit);
3529
3530 // Issue CALLSEQ_END
3531 unsigned NumBytesForCalleeToPop =
3532 X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
3533 TM.Options.GuaranteedTailCallOpt)
3534 ? NumBytes // Callee pops everything.
3535 : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CB);
3536 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
3537 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
3538 .addImm(NumBytes).addImm(NumBytesForCalleeToPop);
3539
3540 // Now handle call return values.
3541 SmallVector<CCValAssign, 16> RVLocs;
3542 CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs,
3543 CLI.RetTy->getContext());
3544 CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
3545
3546 // Copy all of the result registers out of their specified physreg.
3547 Register ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
3548 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3549 CCValAssign &VA = RVLocs[i];
3550 EVT CopyVT = VA.getValVT();
3551 unsigned CopyReg = ResultReg + i;
3552 Register SrcReg = VA.getLocReg();
3553
3554 // If this is x86-64, and we disabled SSE, we can't return FP values
3555 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
3556 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
3557 report_fatal_error("SSE register return with SSE disabled");
3558 }
3559
3560 // If we prefer to use the value in xmm registers, copy it out as f80 and
3561 // use a truncate to move it from fp stack reg to xmm reg.
3562 if ((SrcReg == X86::FP0 || SrcReg == X86::FP1) &&
3563 isScalarFPTypeInSSEReg(VA.getValVT())) {
3564 CopyVT = MVT::f80;
3565 CopyReg = createResultReg(&X86::RFP80RegClass);
3566 }
3567
3568 // Copy out the result.
3569 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3570 TII.get(TargetOpcode::COPY), CopyReg).addReg(SrcReg);
3571 InRegs.push_back(VA.getLocReg());
3572
3573 // Round the f80 to the right size, which also moves it to the appropriate
3574 // xmm register. This is accomplished by storing the f80 value in memory
3575 // and then loading it back.
3576 if (CopyVT != VA.getValVT()) {
3577 EVT ResVT = VA.getValVT();
3578 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
3579 unsigned MemSize = ResVT.getSizeInBits()/8;
3580 int FI = MFI.CreateStackObject(MemSize, Align(MemSize), false);
3581 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3582 TII.get(Opc)), FI)
3583 .addReg(CopyReg);
3584 Opc = ResVT == MVT::f32 ? X86::MOVSSrm_alt : X86::MOVSDrm_alt;
3585 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3586 TII.get(Opc), ResultReg + i), FI);
3587 }
3588 }
3589
3590 CLI.ResultReg = ResultReg;
3591 CLI.NumResultRegs = RVLocs.size();
3592 CLI.Call = MIB;
3593
3594 return true;
3595 }
3596
3597 bool
fastSelectInstruction(const Instruction * I)3598 X86FastISel::fastSelectInstruction(const Instruction *I) {
3599 switch (I->getOpcode()) {
3600 default: break;
3601 case Instruction::Load:
3602 return X86SelectLoad(I);
3603 case Instruction::Store:
3604 return X86SelectStore(I);
3605 case Instruction::Ret:
3606 return X86SelectRet(I);
3607 case Instruction::ICmp:
3608 case Instruction::FCmp:
3609 return X86SelectCmp(I);
3610 case Instruction::ZExt:
3611 return X86SelectZExt(I);
3612 case Instruction::SExt:
3613 return X86SelectSExt(I);
3614 case Instruction::Br:
3615 return X86SelectBranch(I);
3616 case Instruction::LShr:
3617 case Instruction::AShr:
3618 case Instruction::Shl:
3619 return X86SelectShift(I);
3620 case Instruction::SDiv:
3621 case Instruction::UDiv:
3622 case Instruction::SRem:
3623 case Instruction::URem:
3624 return X86SelectDivRem(I);
3625 case Instruction::Select:
3626 return X86SelectSelect(I);
3627 case Instruction::Trunc:
3628 return X86SelectTrunc(I);
3629 case Instruction::FPExt:
3630 return X86SelectFPExt(I);
3631 case Instruction::FPTrunc:
3632 return X86SelectFPTrunc(I);
3633 case Instruction::SIToFP:
3634 return X86SelectSIToFP(I);
3635 case Instruction::UIToFP:
3636 return X86SelectUIToFP(I);
3637 case Instruction::IntToPtr: // Deliberate fall-through.
3638 case Instruction::PtrToInt: {
3639 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
3640 EVT DstVT = TLI.getValueType(DL, I->getType());
3641 if (DstVT.bitsGT(SrcVT))
3642 return X86SelectZExt(I);
3643 if (DstVT.bitsLT(SrcVT))
3644 return X86SelectTrunc(I);
3645 Register Reg = getRegForValue(I->getOperand(0));
3646 if (Reg == 0) return false;
3647 updateValueMap(I, Reg);
3648 return true;
3649 }
3650 case Instruction::BitCast: {
3651 // Select SSE2/AVX bitcasts between 128/256/512 bit vector types.
3652 if (!Subtarget->hasSSE2())
3653 return false;
3654
3655 MVT SrcVT, DstVT;
3656 if (!isTypeLegal(I->getOperand(0)->getType(), SrcVT) ||
3657 !isTypeLegal(I->getType(), DstVT))
3658 return false;
3659
3660 // Only allow vectors that use xmm/ymm/zmm.
3661 if (!SrcVT.isVector() || !DstVT.isVector() ||
3662 SrcVT.getVectorElementType() == MVT::i1 ||
3663 DstVT.getVectorElementType() == MVT::i1)
3664 return false;
3665
3666 Register Reg = getRegForValue(I->getOperand(0));
3667 if (!Reg)
3668 return false;
3669
3670 // Emit a reg-reg copy so we don't propagate cached known bits information
3671 // with the wrong VT if we fall out of fast isel after selecting this.
3672 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
3673 Register ResultReg = createResultReg(DstClass);
3674 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3675 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
3676
3677 updateValueMap(I, ResultReg);
3678 return true;
3679 }
3680 }
3681
3682 return false;
3683 }
3684
X86MaterializeInt(const ConstantInt * CI,MVT VT)3685 unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
3686 if (VT > MVT::i64)
3687 return 0;
3688
3689 uint64_t Imm = CI->getZExtValue();
3690 if (Imm == 0) {
3691 Register SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
3692 switch (VT.SimpleTy) {
3693 default: llvm_unreachable("Unexpected value type");
3694 case MVT::i1:
3695 case MVT::i8:
3696 return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Op0IsKill=*/true,
3697 X86::sub_8bit);
3698 case MVT::i16:
3699 return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Op0IsKill=*/true,
3700 X86::sub_16bit);
3701 case MVT::i32:
3702 return SrcReg;
3703 case MVT::i64: {
3704 Register ResultReg = createResultReg(&X86::GR64RegClass);
3705 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3706 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
3707 .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
3708 return ResultReg;
3709 }
3710 }
3711 }
3712
3713 unsigned Opc = 0;
3714 switch (VT.SimpleTy) {
3715 default: llvm_unreachable("Unexpected value type");
3716 case MVT::i1:
3717 VT = MVT::i8;
3718 LLVM_FALLTHROUGH;
3719 case MVT::i8: Opc = X86::MOV8ri; break;
3720 case MVT::i16: Opc = X86::MOV16ri; break;
3721 case MVT::i32: Opc = X86::MOV32ri; break;
3722 case MVT::i64: {
3723 if (isUInt<32>(Imm))
3724 Opc = X86::MOV32ri64;
3725 else if (isInt<32>(Imm))
3726 Opc = X86::MOV64ri32;
3727 else
3728 Opc = X86::MOV64ri;
3729 break;
3730 }
3731 }
3732 return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
3733 }
3734
X86MaterializeFP(const ConstantFP * CFP,MVT VT)3735 unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
3736 if (CFP->isNullValue())
3737 return fastMaterializeFloatZero(CFP);
3738
3739 // Can't handle alternate code models yet.
3740 CodeModel::Model CM = TM.getCodeModel();
3741 if (CM != CodeModel::Small && CM != CodeModel::Large)
3742 return 0;
3743
3744 // Get opcode and regclass of the output for the given load instruction.
3745 unsigned Opc = 0;
3746 bool HasAVX = Subtarget->hasAVX();
3747 bool HasAVX512 = Subtarget->hasAVX512();
3748 switch (VT.SimpleTy) {
3749 default: return 0;
3750 case MVT::f32:
3751 if (X86ScalarSSEf32)
3752 Opc = HasAVX512 ? X86::VMOVSSZrm_alt :
3753 HasAVX ? X86::VMOVSSrm_alt :
3754 X86::MOVSSrm_alt;
3755 else
3756 Opc = X86::LD_Fp32m;
3757 break;
3758 case MVT::f64:
3759 if (X86ScalarSSEf64)
3760 Opc = HasAVX512 ? X86::VMOVSDZrm_alt :
3761 HasAVX ? X86::VMOVSDrm_alt :
3762 X86::MOVSDrm_alt;
3763 else
3764 Opc = X86::LD_Fp64m;
3765 break;
3766 case MVT::f80:
3767 // No f80 support yet.
3768 return 0;
3769 }
3770
3771 // MachineConstantPool wants an explicit alignment.
3772 Align Alignment = DL.getPrefTypeAlign(CFP->getType());
3773
3774 // x86-32 PIC requires a PIC base register for constant pools.
3775 unsigned PICBase = 0;
3776 unsigned char OpFlag = Subtarget->classifyLocalReference(nullptr);
3777 if (OpFlag == X86II::MO_PIC_BASE_OFFSET)
3778 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3779 else if (OpFlag == X86II::MO_GOTOFF)
3780 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3781 else if (Subtarget->is64Bit() && TM.getCodeModel() == CodeModel::Small)
3782 PICBase = X86::RIP;
3783
3784 // Create the load from the constant pool.
3785 unsigned CPI = MCP.getConstantPoolIndex(CFP, Alignment);
3786 Register ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
3787
3788 // Large code model only applies to 64-bit mode.
3789 if (Subtarget->is64Bit() && CM == CodeModel::Large) {
3790 Register AddrReg = createResultReg(&X86::GR64RegClass);
3791 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
3792 AddrReg)
3793 .addConstantPoolIndex(CPI, 0, OpFlag);
3794 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3795 TII.get(Opc), ResultReg);
3796 addRegReg(MIB, AddrReg, false, PICBase, false);
3797 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
3798 MachinePointerInfo::getConstantPool(*FuncInfo.MF),
3799 MachineMemOperand::MOLoad, DL.getPointerSize(), Alignment);
3800 MIB->addMemOperand(*FuncInfo.MF, MMO);
3801 return ResultReg;
3802 }
3803
3804 addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3805 TII.get(Opc), ResultReg),
3806 CPI, PICBase, OpFlag);
3807 return ResultReg;
3808 }
3809
X86MaterializeGV(const GlobalValue * GV,MVT VT)3810 unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
3811 // Can't handle alternate code models yet.
3812 if (TM.getCodeModel() != CodeModel::Small)
3813 return 0;
3814
3815 // Materialize addresses with LEA/MOV instructions.
3816 X86AddressMode AM;
3817 if (X86SelectAddress(GV, AM)) {
3818 // If the expression is just a basereg, then we're done, otherwise we need
3819 // to emit an LEA.
3820 if (AM.BaseType == X86AddressMode::RegBase &&
3821 AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
3822 return AM.Base.Reg;
3823
3824 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3825 if (TM.getRelocationModel() == Reloc::Static &&
3826 TLI.getPointerTy(DL) == MVT::i64) {
3827 // The displacement code could be more than 32 bits away so we need to use
3828 // an instruction with a 64 bit immediate
3829 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
3830 ResultReg)
3831 .addGlobalAddress(GV);
3832 } else {
3833 unsigned Opc =
3834 TLI.getPointerTy(DL) == MVT::i32
3835 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3836 : X86::LEA64r;
3837 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3838 TII.get(Opc), ResultReg), AM);
3839 }
3840 return ResultReg;
3841 }
3842 return 0;
3843 }
3844
fastMaterializeConstant(const Constant * C)3845 unsigned X86FastISel::fastMaterializeConstant(const Constant *C) {
3846 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
3847
3848 // Only handle simple types.
3849 if (!CEVT.isSimple())
3850 return 0;
3851 MVT VT = CEVT.getSimpleVT();
3852
3853 if (const auto *CI = dyn_cast<ConstantInt>(C))
3854 return X86MaterializeInt(CI, VT);
3855 else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
3856 return X86MaterializeFP(CFP, VT);
3857 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
3858 return X86MaterializeGV(GV, VT);
3859
3860 return 0;
3861 }
3862
fastMaterializeAlloca(const AllocaInst * C)3863 unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {
3864 // Fail on dynamic allocas. At this point, getRegForValue has already
3865 // checked its CSE maps, so if we're here trying to handle a dynamic
3866 // alloca, we're not going to succeed. X86SelectAddress has a
3867 // check for dynamic allocas, because it's called directly from
3868 // various places, but targetMaterializeAlloca also needs a check
3869 // in order to avoid recursion between getRegForValue,
3870 // X86SelectAddrss, and targetMaterializeAlloca.
3871 if (!FuncInfo.StaticAllocaMap.count(C))
3872 return 0;
3873 assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?");
3874
3875 X86AddressMode AM;
3876 if (!X86SelectAddress(C, AM))
3877 return 0;
3878 unsigned Opc =
3879 TLI.getPointerTy(DL) == MVT::i32
3880 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3881 : X86::LEA64r;
3882 const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
3883 Register ResultReg = createResultReg(RC);
3884 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3885 TII.get(Opc), ResultReg), AM);
3886 return ResultReg;
3887 }
3888
fastMaterializeFloatZero(const ConstantFP * CF)3889 unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) {
3890 MVT VT;
3891 if (!isTypeLegal(CF->getType(), VT))
3892 return 0;
3893
3894 // Get opcode and regclass for the given zero.
3895 bool HasAVX512 = Subtarget->hasAVX512();
3896 unsigned Opc = 0;
3897 switch (VT.SimpleTy) {
3898 default: return 0;
3899 case MVT::f32:
3900 if (X86ScalarSSEf32)
3901 Opc = HasAVX512 ? X86::AVX512_FsFLD0SS : X86::FsFLD0SS;
3902 else
3903 Opc = X86::LD_Fp032;
3904 break;
3905 case MVT::f64:
3906 if (X86ScalarSSEf64)
3907 Opc = HasAVX512 ? X86::AVX512_FsFLD0SD : X86::FsFLD0SD;
3908 else
3909 Opc = X86::LD_Fp064;
3910 break;
3911 case MVT::f80:
3912 // No f80 support yet.
3913 return 0;
3914 }
3915
3916 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3917 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
3918 return ResultReg;
3919 }
3920
3921
tryToFoldLoadIntoMI(MachineInstr * MI,unsigned OpNo,const LoadInst * LI)3922 bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
3923 const LoadInst *LI) {
3924 const Value *Ptr = LI->getPointerOperand();
3925 X86AddressMode AM;
3926 if (!X86SelectAddress(Ptr, AM))
3927 return false;
3928
3929 const X86InstrInfo &XII = (const X86InstrInfo &)TII;
3930
3931 unsigned Size = DL.getTypeAllocSize(LI->getType());
3932
3933 SmallVector<MachineOperand, 8> AddrOps;
3934 AM.getFullAddress(AddrOps);
3935
3936 MachineInstr *Result = XII.foldMemoryOperandImpl(
3937 *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, LI->getAlign(),
3938 /*AllowCommute=*/true);
3939 if (!Result)
3940 return false;
3941
3942 // The index register could be in the wrong register class. Unfortunately,
3943 // foldMemoryOperandImpl could have commuted the instruction so its not enough
3944 // to just look at OpNo + the offset to the index reg. We actually need to
3945 // scan the instruction to find the index reg and see if its the correct reg
3946 // class.
3947 unsigned OperandNo = 0;
3948 for (MachineInstr::mop_iterator I = Result->operands_begin(),
3949 E = Result->operands_end(); I != E; ++I, ++OperandNo) {
3950 MachineOperand &MO = *I;
3951 if (!MO.isReg() || MO.isDef() || MO.getReg() != AM.IndexReg)
3952 continue;
3953 // Found the index reg, now try to rewrite it.
3954 Register IndexReg = constrainOperandRegClass(Result->getDesc(),
3955 MO.getReg(), OperandNo);
3956 if (IndexReg == MO.getReg())
3957 continue;
3958 MO.setReg(IndexReg);
3959 }
3960
3961 Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
3962 Result->cloneInstrSymbols(*FuncInfo.MF, *MI);
3963 MachineBasicBlock::iterator I(MI);
3964 removeDeadCode(I, std::next(I));
3965 return true;
3966 }
3967
fastEmitInst_rrrr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,unsigned Op2,bool Op2IsKill,unsigned Op3,bool Op3IsKill)3968 unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
3969 const TargetRegisterClass *RC,
3970 unsigned Op0, bool Op0IsKill,
3971 unsigned Op1, bool Op1IsKill,
3972 unsigned Op2, bool Op2IsKill,
3973 unsigned Op3, bool Op3IsKill) {
3974 const MCInstrDesc &II = TII.get(MachineInstOpcode);
3975
3976 Register ResultReg = createResultReg(RC);
3977 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
3978 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
3979 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
3980 Op3 = constrainOperandRegClass(II, Op3, II.getNumDefs() + 3);
3981
3982 if (II.getNumDefs() >= 1)
3983 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
3984 .addReg(Op0, getKillRegState(Op0IsKill))
3985 .addReg(Op1, getKillRegState(Op1IsKill))
3986 .addReg(Op2, getKillRegState(Op2IsKill))
3987 .addReg(Op3, getKillRegState(Op3IsKill));
3988 else {
3989 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
3990 .addReg(Op0, getKillRegState(Op0IsKill))
3991 .addReg(Op1, getKillRegState(Op1IsKill))
3992 .addReg(Op2, getKillRegState(Op2IsKill))
3993 .addReg(Op3, getKillRegState(Op3IsKill));
3994 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3995 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
3996 }
3997 return ResultReg;
3998 }
3999
4000
4001 namespace llvm {
createFastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo)4002 FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo,
4003 const TargetLibraryInfo *libInfo) {
4004 return new X86FastISel(funcInfo, libInfo);
4005 }
4006 }
4007