1 //===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the X86 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "X86InstrInfo.h"
14 #include "X86.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrFoldTables.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/Sequence.h"
22 #include "llvm/CodeGen/LivePhysRegs.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineConstantPool.h"
25 #include "llvm/CodeGen/MachineDominators.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/StackMaps.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/DebugInfoMetadata.h"
34 #include "llvm/MC/MCAsmInfo.h"
35 #include "llvm/MC/MCExpr.h"
36 #include "llvm/MC/MCInst.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Target/TargetOptions.h"
42
43 using namespace llvm;
44
45 #define DEBUG_TYPE "x86-instr-info"
46
47 #define GET_INSTRINFO_CTOR_DTOR
48 #include "X86GenInstrInfo.inc"
49
50 static cl::opt<bool>
51 NoFusing("disable-spill-fusing",
52 cl::desc("Disable fusing of spill code into instructions"),
53 cl::Hidden);
54 static cl::opt<bool>
55 PrintFailedFusing("print-failed-fuse-candidates",
56 cl::desc("Print instructions that the allocator wants to"
57 " fuse, but the X86 backend currently can't"),
58 cl::Hidden);
59 static cl::opt<bool>
60 ReMatPICStubLoad("remat-pic-stub-load",
61 cl::desc("Re-materialize load from stub in PIC mode"),
62 cl::init(false), cl::Hidden);
63 static cl::opt<unsigned>
64 PartialRegUpdateClearance("partial-reg-update-clearance",
65 cl::desc("Clearance between two register writes "
66 "for inserting XOR to avoid partial "
67 "register update"),
68 cl::init(64), cl::Hidden);
69 static cl::opt<unsigned>
70 UndefRegClearance("undef-reg-clearance",
71 cl::desc("How many idle instructions we would like before "
72 "certain undef register reads"),
73 cl::init(128), cl::Hidden);
74
75
76 // Pin the vtable to this file.
anchor()77 void X86InstrInfo::anchor() {}
78
X86InstrInfo(X86Subtarget & STI)79 X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
80 : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64
81 : X86::ADJCALLSTACKDOWN32),
82 (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
83 : X86::ADJCALLSTACKUP32),
84 X86::CATCHRET,
85 (STI.is64Bit() ? X86::RETQ : X86::RETL)),
86 Subtarget(STI), RI(STI.getTargetTriple()) {
87 }
88
89 bool
isCoalescableExtInstr(const MachineInstr & MI,Register & SrcReg,Register & DstReg,unsigned & SubIdx) const90 X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
91 Register &SrcReg, Register &DstReg,
92 unsigned &SubIdx) const {
93 switch (MI.getOpcode()) {
94 default: break;
95 case X86::MOVSX16rr8:
96 case X86::MOVZX16rr8:
97 case X86::MOVSX32rr8:
98 case X86::MOVZX32rr8:
99 case X86::MOVSX64rr8:
100 if (!Subtarget.is64Bit())
101 // It's not always legal to reference the low 8-bit of the larger
102 // register in 32-bit mode.
103 return false;
104 LLVM_FALLTHROUGH;
105 case X86::MOVSX32rr16:
106 case X86::MOVZX32rr16:
107 case X86::MOVSX64rr16:
108 case X86::MOVSX64rr32: {
109 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
110 // Be conservative.
111 return false;
112 SrcReg = MI.getOperand(1).getReg();
113 DstReg = MI.getOperand(0).getReg();
114 switch (MI.getOpcode()) {
115 default: llvm_unreachable("Unreachable!");
116 case X86::MOVSX16rr8:
117 case X86::MOVZX16rr8:
118 case X86::MOVSX32rr8:
119 case X86::MOVZX32rr8:
120 case X86::MOVSX64rr8:
121 SubIdx = X86::sub_8bit;
122 break;
123 case X86::MOVSX32rr16:
124 case X86::MOVZX32rr16:
125 case X86::MOVSX64rr16:
126 SubIdx = X86::sub_16bit;
127 break;
128 case X86::MOVSX64rr32:
129 SubIdx = X86::sub_32bit;
130 break;
131 }
132 return true;
133 }
134 }
135 return false;
136 }
137
isDataInvariant(MachineInstr & MI)138 bool X86InstrInfo::isDataInvariant(MachineInstr &MI) {
139 switch (MI.getOpcode()) {
140 default:
141 // By default, assume that the instruction is not data invariant.
142 return false;
143
144 // Some target-independent operations that trivially lower to data-invariant
145 // instructions.
146 case TargetOpcode::COPY:
147 case TargetOpcode::INSERT_SUBREG:
148 case TargetOpcode::SUBREG_TO_REG:
149 return true;
150
151 // On x86 it is believed that imul is constant time w.r.t. the loaded data.
152 // However, they set flags and are perhaps the most surprisingly constant
153 // time operations so we call them out here separately.
154 case X86::IMUL16rr:
155 case X86::IMUL16rri8:
156 case X86::IMUL16rri:
157 case X86::IMUL32rr:
158 case X86::IMUL32rri8:
159 case X86::IMUL32rri:
160 case X86::IMUL64rr:
161 case X86::IMUL64rri32:
162 case X86::IMUL64rri8:
163
164 // Bit scanning and counting instructions that are somewhat surprisingly
165 // constant time as they scan across bits and do other fairly complex
166 // operations like popcnt, but are believed to be constant time on x86.
167 // However, these set flags.
168 case X86::BSF16rr:
169 case X86::BSF32rr:
170 case X86::BSF64rr:
171 case X86::BSR16rr:
172 case X86::BSR32rr:
173 case X86::BSR64rr:
174 case X86::LZCNT16rr:
175 case X86::LZCNT32rr:
176 case X86::LZCNT64rr:
177 case X86::POPCNT16rr:
178 case X86::POPCNT32rr:
179 case X86::POPCNT64rr:
180 case X86::TZCNT16rr:
181 case X86::TZCNT32rr:
182 case X86::TZCNT64rr:
183
184 // Bit manipulation instructions are effectively combinations of basic
185 // arithmetic ops, and should still execute in constant time. These also
186 // set flags.
187 case X86::BLCFILL32rr:
188 case X86::BLCFILL64rr:
189 case X86::BLCI32rr:
190 case X86::BLCI64rr:
191 case X86::BLCIC32rr:
192 case X86::BLCIC64rr:
193 case X86::BLCMSK32rr:
194 case X86::BLCMSK64rr:
195 case X86::BLCS32rr:
196 case X86::BLCS64rr:
197 case X86::BLSFILL32rr:
198 case X86::BLSFILL64rr:
199 case X86::BLSI32rr:
200 case X86::BLSI64rr:
201 case X86::BLSIC32rr:
202 case X86::BLSIC64rr:
203 case X86::BLSMSK32rr:
204 case X86::BLSMSK64rr:
205 case X86::BLSR32rr:
206 case X86::BLSR64rr:
207 case X86::TZMSK32rr:
208 case X86::TZMSK64rr:
209
210 // Bit extracting and clearing instructions should execute in constant time,
211 // and set flags.
212 case X86::BEXTR32rr:
213 case X86::BEXTR64rr:
214 case X86::BEXTRI32ri:
215 case X86::BEXTRI64ri:
216 case X86::BZHI32rr:
217 case X86::BZHI64rr:
218
219 // Shift and rotate.
220 case X86::ROL8r1:
221 case X86::ROL16r1:
222 case X86::ROL32r1:
223 case X86::ROL64r1:
224 case X86::ROL8rCL:
225 case X86::ROL16rCL:
226 case X86::ROL32rCL:
227 case X86::ROL64rCL:
228 case X86::ROL8ri:
229 case X86::ROL16ri:
230 case X86::ROL32ri:
231 case X86::ROL64ri:
232 case X86::ROR8r1:
233 case X86::ROR16r1:
234 case X86::ROR32r1:
235 case X86::ROR64r1:
236 case X86::ROR8rCL:
237 case X86::ROR16rCL:
238 case X86::ROR32rCL:
239 case X86::ROR64rCL:
240 case X86::ROR8ri:
241 case X86::ROR16ri:
242 case X86::ROR32ri:
243 case X86::ROR64ri:
244 case X86::SAR8r1:
245 case X86::SAR16r1:
246 case X86::SAR32r1:
247 case X86::SAR64r1:
248 case X86::SAR8rCL:
249 case X86::SAR16rCL:
250 case X86::SAR32rCL:
251 case X86::SAR64rCL:
252 case X86::SAR8ri:
253 case X86::SAR16ri:
254 case X86::SAR32ri:
255 case X86::SAR64ri:
256 case X86::SHL8r1:
257 case X86::SHL16r1:
258 case X86::SHL32r1:
259 case X86::SHL64r1:
260 case X86::SHL8rCL:
261 case X86::SHL16rCL:
262 case X86::SHL32rCL:
263 case X86::SHL64rCL:
264 case X86::SHL8ri:
265 case X86::SHL16ri:
266 case X86::SHL32ri:
267 case X86::SHL64ri:
268 case X86::SHR8r1:
269 case X86::SHR16r1:
270 case X86::SHR32r1:
271 case X86::SHR64r1:
272 case X86::SHR8rCL:
273 case X86::SHR16rCL:
274 case X86::SHR32rCL:
275 case X86::SHR64rCL:
276 case X86::SHR8ri:
277 case X86::SHR16ri:
278 case X86::SHR32ri:
279 case X86::SHR64ri:
280 case X86::SHLD16rrCL:
281 case X86::SHLD32rrCL:
282 case X86::SHLD64rrCL:
283 case X86::SHLD16rri8:
284 case X86::SHLD32rri8:
285 case X86::SHLD64rri8:
286 case X86::SHRD16rrCL:
287 case X86::SHRD32rrCL:
288 case X86::SHRD64rrCL:
289 case X86::SHRD16rri8:
290 case X86::SHRD32rri8:
291 case X86::SHRD64rri8:
292
293 // Basic arithmetic is constant time on the input but does set flags.
294 case X86::ADC8rr:
295 case X86::ADC8ri:
296 case X86::ADC16rr:
297 case X86::ADC16ri:
298 case X86::ADC16ri8:
299 case X86::ADC32rr:
300 case X86::ADC32ri:
301 case X86::ADC32ri8:
302 case X86::ADC64rr:
303 case X86::ADC64ri8:
304 case X86::ADC64ri32:
305 case X86::ADD8rr:
306 case X86::ADD8ri:
307 case X86::ADD16rr:
308 case X86::ADD16ri:
309 case X86::ADD16ri8:
310 case X86::ADD32rr:
311 case X86::ADD32ri:
312 case X86::ADD32ri8:
313 case X86::ADD64rr:
314 case X86::ADD64ri8:
315 case X86::ADD64ri32:
316 case X86::AND8rr:
317 case X86::AND8ri:
318 case X86::AND16rr:
319 case X86::AND16ri:
320 case X86::AND16ri8:
321 case X86::AND32rr:
322 case X86::AND32ri:
323 case X86::AND32ri8:
324 case X86::AND64rr:
325 case X86::AND64ri8:
326 case X86::AND64ri32:
327 case X86::OR8rr:
328 case X86::OR8ri:
329 case X86::OR16rr:
330 case X86::OR16ri:
331 case X86::OR16ri8:
332 case X86::OR32rr:
333 case X86::OR32ri:
334 case X86::OR32ri8:
335 case X86::OR64rr:
336 case X86::OR64ri8:
337 case X86::OR64ri32:
338 case X86::SBB8rr:
339 case X86::SBB8ri:
340 case X86::SBB16rr:
341 case X86::SBB16ri:
342 case X86::SBB16ri8:
343 case X86::SBB32rr:
344 case X86::SBB32ri:
345 case X86::SBB32ri8:
346 case X86::SBB64rr:
347 case X86::SBB64ri8:
348 case X86::SBB64ri32:
349 case X86::SUB8rr:
350 case X86::SUB8ri:
351 case X86::SUB16rr:
352 case X86::SUB16ri:
353 case X86::SUB16ri8:
354 case X86::SUB32rr:
355 case X86::SUB32ri:
356 case X86::SUB32ri8:
357 case X86::SUB64rr:
358 case X86::SUB64ri8:
359 case X86::SUB64ri32:
360 case X86::XOR8rr:
361 case X86::XOR8ri:
362 case X86::XOR16rr:
363 case X86::XOR16ri:
364 case X86::XOR16ri8:
365 case X86::XOR32rr:
366 case X86::XOR32ri:
367 case X86::XOR32ri8:
368 case X86::XOR64rr:
369 case X86::XOR64ri8:
370 case X86::XOR64ri32:
371 // Arithmetic with just 32-bit and 64-bit variants and no immediates.
372 case X86::ADCX32rr:
373 case X86::ADCX64rr:
374 case X86::ADOX32rr:
375 case X86::ADOX64rr:
376 case X86::ANDN32rr:
377 case X86::ANDN64rr:
378 // Unary arithmetic operations.
379 case X86::DEC8r:
380 case X86::DEC16r:
381 case X86::DEC32r:
382 case X86::DEC64r:
383 case X86::INC8r:
384 case X86::INC16r:
385 case X86::INC32r:
386 case X86::INC64r:
387 case X86::NEG8r:
388 case X86::NEG16r:
389 case X86::NEG32r:
390 case X86::NEG64r:
391
392 // Unlike other arithmetic, NOT doesn't set EFLAGS.
393 case X86::NOT8r:
394 case X86::NOT16r:
395 case X86::NOT32r:
396 case X86::NOT64r:
397
398 // Various move instructions used to zero or sign extend things. Note that we
399 // intentionally don't support the _NOREX variants as we can't handle that
400 // register constraint anyways.
401 case X86::MOVSX16rr8:
402 case X86::MOVSX32rr8:
403 case X86::MOVSX32rr16:
404 case X86::MOVSX64rr8:
405 case X86::MOVSX64rr16:
406 case X86::MOVSX64rr32:
407 case X86::MOVZX16rr8:
408 case X86::MOVZX32rr8:
409 case X86::MOVZX32rr16:
410 case X86::MOVZX64rr8:
411 case X86::MOVZX64rr16:
412 case X86::MOV32rr:
413
414 // Arithmetic instructions that are both constant time and don't set flags.
415 case X86::RORX32ri:
416 case X86::RORX64ri:
417 case X86::SARX32rr:
418 case X86::SARX64rr:
419 case X86::SHLX32rr:
420 case X86::SHLX64rr:
421 case X86::SHRX32rr:
422 case X86::SHRX64rr:
423
424 // LEA doesn't actually access memory, and its arithmetic is constant time.
425 case X86::LEA16r:
426 case X86::LEA32r:
427 case X86::LEA64_32r:
428 case X86::LEA64r:
429 return true;
430 }
431 }
432
isDataInvariantLoad(MachineInstr & MI)433 bool X86InstrInfo::isDataInvariantLoad(MachineInstr &MI) {
434 switch (MI.getOpcode()) {
435 default:
436 // By default, assume that the load will immediately leak.
437 return false;
438
439 // On x86 it is believed that imul is constant time w.r.t. the loaded data.
440 // However, they set flags and are perhaps the most surprisingly constant
441 // time operations so we call them out here separately.
442 case X86::IMUL16rm:
443 case X86::IMUL16rmi8:
444 case X86::IMUL16rmi:
445 case X86::IMUL32rm:
446 case X86::IMUL32rmi8:
447 case X86::IMUL32rmi:
448 case X86::IMUL64rm:
449 case X86::IMUL64rmi32:
450 case X86::IMUL64rmi8:
451
452 // Bit scanning and counting instructions that are somewhat surprisingly
453 // constant time as they scan across bits and do other fairly complex
454 // operations like popcnt, but are believed to be constant time on x86.
455 // However, these set flags.
456 case X86::BSF16rm:
457 case X86::BSF32rm:
458 case X86::BSF64rm:
459 case X86::BSR16rm:
460 case X86::BSR32rm:
461 case X86::BSR64rm:
462 case X86::LZCNT16rm:
463 case X86::LZCNT32rm:
464 case X86::LZCNT64rm:
465 case X86::POPCNT16rm:
466 case X86::POPCNT32rm:
467 case X86::POPCNT64rm:
468 case X86::TZCNT16rm:
469 case X86::TZCNT32rm:
470 case X86::TZCNT64rm:
471
472 // Bit manipulation instructions are effectively combinations of basic
473 // arithmetic ops, and should still execute in constant time. These also
474 // set flags.
475 case X86::BLCFILL32rm:
476 case X86::BLCFILL64rm:
477 case X86::BLCI32rm:
478 case X86::BLCI64rm:
479 case X86::BLCIC32rm:
480 case X86::BLCIC64rm:
481 case X86::BLCMSK32rm:
482 case X86::BLCMSK64rm:
483 case X86::BLCS32rm:
484 case X86::BLCS64rm:
485 case X86::BLSFILL32rm:
486 case X86::BLSFILL64rm:
487 case X86::BLSI32rm:
488 case X86::BLSI64rm:
489 case X86::BLSIC32rm:
490 case X86::BLSIC64rm:
491 case X86::BLSMSK32rm:
492 case X86::BLSMSK64rm:
493 case X86::BLSR32rm:
494 case X86::BLSR64rm:
495 case X86::TZMSK32rm:
496 case X86::TZMSK64rm:
497
498 // Bit extracting and clearing instructions should execute in constant time,
499 // and set flags.
500 case X86::BEXTR32rm:
501 case X86::BEXTR64rm:
502 case X86::BEXTRI32mi:
503 case X86::BEXTRI64mi:
504 case X86::BZHI32rm:
505 case X86::BZHI64rm:
506
507 // Basic arithmetic is constant time on the input but does set flags.
508 case X86::ADC8rm:
509 case X86::ADC16rm:
510 case X86::ADC32rm:
511 case X86::ADC64rm:
512 case X86::ADCX32rm:
513 case X86::ADCX64rm:
514 case X86::ADD8rm:
515 case X86::ADD16rm:
516 case X86::ADD32rm:
517 case X86::ADD64rm:
518 case X86::ADOX32rm:
519 case X86::ADOX64rm:
520 case X86::AND8rm:
521 case X86::AND16rm:
522 case X86::AND32rm:
523 case X86::AND64rm:
524 case X86::ANDN32rm:
525 case X86::ANDN64rm:
526 case X86::OR8rm:
527 case X86::OR16rm:
528 case X86::OR32rm:
529 case X86::OR64rm:
530 case X86::SBB8rm:
531 case X86::SBB16rm:
532 case X86::SBB32rm:
533 case X86::SBB64rm:
534 case X86::SUB8rm:
535 case X86::SUB16rm:
536 case X86::SUB32rm:
537 case X86::SUB64rm:
538 case X86::XOR8rm:
539 case X86::XOR16rm:
540 case X86::XOR32rm:
541 case X86::XOR64rm:
542
543 // Integer multiply w/o affecting flags is still believed to be constant
544 // time on x86. Called out separately as this is among the most surprising
545 // instructions to exhibit that behavior.
546 case X86::MULX32rm:
547 case X86::MULX64rm:
548
549 // Arithmetic instructions that are both constant time and don't set flags.
550 case X86::RORX32mi:
551 case X86::RORX64mi:
552 case X86::SARX32rm:
553 case X86::SARX64rm:
554 case X86::SHLX32rm:
555 case X86::SHLX64rm:
556 case X86::SHRX32rm:
557 case X86::SHRX64rm:
558
559 // Conversions are believed to be constant time and don't set flags.
560 case X86::CVTTSD2SI64rm:
561 case X86::VCVTTSD2SI64rm:
562 case X86::VCVTTSD2SI64Zrm:
563 case X86::CVTTSD2SIrm:
564 case X86::VCVTTSD2SIrm:
565 case X86::VCVTTSD2SIZrm:
566 case X86::CVTTSS2SI64rm:
567 case X86::VCVTTSS2SI64rm:
568 case X86::VCVTTSS2SI64Zrm:
569 case X86::CVTTSS2SIrm:
570 case X86::VCVTTSS2SIrm:
571 case X86::VCVTTSS2SIZrm:
572 case X86::CVTSI2SDrm:
573 case X86::VCVTSI2SDrm:
574 case X86::VCVTSI2SDZrm:
575 case X86::CVTSI2SSrm:
576 case X86::VCVTSI2SSrm:
577 case X86::VCVTSI2SSZrm:
578 case X86::CVTSI642SDrm:
579 case X86::VCVTSI642SDrm:
580 case X86::VCVTSI642SDZrm:
581 case X86::CVTSI642SSrm:
582 case X86::VCVTSI642SSrm:
583 case X86::VCVTSI642SSZrm:
584 case X86::CVTSS2SDrm:
585 case X86::VCVTSS2SDrm:
586 case X86::VCVTSS2SDZrm:
587 case X86::CVTSD2SSrm:
588 case X86::VCVTSD2SSrm:
589 case X86::VCVTSD2SSZrm:
590 // AVX512 added unsigned integer conversions.
591 case X86::VCVTTSD2USI64Zrm:
592 case X86::VCVTTSD2USIZrm:
593 case X86::VCVTTSS2USI64Zrm:
594 case X86::VCVTTSS2USIZrm:
595 case X86::VCVTUSI2SDZrm:
596 case X86::VCVTUSI642SDZrm:
597 case X86::VCVTUSI2SSZrm:
598 case X86::VCVTUSI642SSZrm:
599
600 // Loads to register don't set flags.
601 case X86::MOV8rm:
602 case X86::MOV8rm_NOREX:
603 case X86::MOV16rm:
604 case X86::MOV32rm:
605 case X86::MOV64rm:
606 case X86::MOVSX16rm8:
607 case X86::MOVSX32rm16:
608 case X86::MOVSX32rm8:
609 case X86::MOVSX32rm8_NOREX:
610 case X86::MOVSX64rm16:
611 case X86::MOVSX64rm32:
612 case X86::MOVSX64rm8:
613 case X86::MOVZX16rm8:
614 case X86::MOVZX32rm16:
615 case X86::MOVZX32rm8:
616 case X86::MOVZX32rm8_NOREX:
617 case X86::MOVZX64rm16:
618 case X86::MOVZX64rm8:
619 return true;
620 }
621 }
622
getSPAdjust(const MachineInstr & MI) const623 int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const {
624 const MachineFunction *MF = MI.getParent()->getParent();
625 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
626
627 if (isFrameInstr(MI)) {
628 int SPAdj = alignTo(getFrameSize(MI), TFI->getStackAlign());
629 SPAdj -= getFrameAdjustment(MI);
630 if (!isFrameSetup(MI))
631 SPAdj = -SPAdj;
632 return SPAdj;
633 }
634
635 // To know whether a call adjusts the stack, we need information
636 // that is bound to the following ADJCALLSTACKUP pseudo.
637 // Look for the next ADJCALLSTACKUP that follows the call.
638 if (MI.isCall()) {
639 const MachineBasicBlock *MBB = MI.getParent();
640 auto I = ++MachineBasicBlock::const_iterator(MI);
641 for (auto E = MBB->end(); I != E; ++I) {
642 if (I->getOpcode() == getCallFrameDestroyOpcode() ||
643 I->isCall())
644 break;
645 }
646
647 // If we could not find a frame destroy opcode, then it has already
648 // been simplified, so we don't care.
649 if (I->getOpcode() != getCallFrameDestroyOpcode())
650 return 0;
651
652 return -(I->getOperand(1).getImm());
653 }
654
655 // Currently handle only PUSHes we can reasonably expect to see
656 // in call sequences
657 switch (MI.getOpcode()) {
658 default:
659 return 0;
660 case X86::PUSH32i8:
661 case X86::PUSH32r:
662 case X86::PUSH32rmm:
663 case X86::PUSH32rmr:
664 case X86::PUSHi32:
665 return 4;
666 case X86::PUSH64i8:
667 case X86::PUSH64r:
668 case X86::PUSH64rmm:
669 case X86::PUSH64rmr:
670 case X86::PUSH64i32:
671 return 8;
672 }
673 }
674
675 /// Return true and the FrameIndex if the specified
676 /// operand and follow operands form a reference to the stack frame.
isFrameOperand(const MachineInstr & MI,unsigned int Op,int & FrameIndex) const677 bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
678 int &FrameIndex) const {
679 if (MI.getOperand(Op + X86::AddrBaseReg).isFI() &&
680 MI.getOperand(Op + X86::AddrScaleAmt).isImm() &&
681 MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
682 MI.getOperand(Op + X86::AddrDisp).isImm() &&
683 MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 &&
684 MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 &&
685 MI.getOperand(Op + X86::AddrDisp).getImm() == 0) {
686 FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex();
687 return true;
688 }
689 return false;
690 }
691
isFrameLoadOpcode(int Opcode,unsigned & MemBytes)692 static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) {
693 switch (Opcode) {
694 default:
695 return false;
696 case X86::MOV8rm:
697 case X86::KMOVBkm:
698 MemBytes = 1;
699 return true;
700 case X86::MOV16rm:
701 case X86::KMOVWkm:
702 MemBytes = 2;
703 return true;
704 case X86::MOV32rm:
705 case X86::MOVSSrm:
706 case X86::MOVSSrm_alt:
707 case X86::VMOVSSrm:
708 case X86::VMOVSSrm_alt:
709 case X86::VMOVSSZrm:
710 case X86::VMOVSSZrm_alt:
711 case X86::KMOVDkm:
712 MemBytes = 4;
713 return true;
714 case X86::MOV64rm:
715 case X86::LD_Fp64m:
716 case X86::MOVSDrm:
717 case X86::MOVSDrm_alt:
718 case X86::VMOVSDrm:
719 case X86::VMOVSDrm_alt:
720 case X86::VMOVSDZrm:
721 case X86::VMOVSDZrm_alt:
722 case X86::MMX_MOVD64rm:
723 case X86::MMX_MOVQ64rm:
724 case X86::KMOVQkm:
725 MemBytes = 8;
726 return true;
727 case X86::MOVAPSrm:
728 case X86::MOVUPSrm:
729 case X86::MOVAPDrm:
730 case X86::MOVUPDrm:
731 case X86::MOVDQArm:
732 case X86::MOVDQUrm:
733 case X86::VMOVAPSrm:
734 case X86::VMOVUPSrm:
735 case X86::VMOVAPDrm:
736 case X86::VMOVUPDrm:
737 case X86::VMOVDQArm:
738 case X86::VMOVDQUrm:
739 case X86::VMOVAPSZ128rm:
740 case X86::VMOVUPSZ128rm:
741 case X86::VMOVAPSZ128rm_NOVLX:
742 case X86::VMOVUPSZ128rm_NOVLX:
743 case X86::VMOVAPDZ128rm:
744 case X86::VMOVUPDZ128rm:
745 case X86::VMOVDQU8Z128rm:
746 case X86::VMOVDQU16Z128rm:
747 case X86::VMOVDQA32Z128rm:
748 case X86::VMOVDQU32Z128rm:
749 case X86::VMOVDQA64Z128rm:
750 case X86::VMOVDQU64Z128rm:
751 MemBytes = 16;
752 return true;
753 case X86::VMOVAPSYrm:
754 case X86::VMOVUPSYrm:
755 case X86::VMOVAPDYrm:
756 case X86::VMOVUPDYrm:
757 case X86::VMOVDQAYrm:
758 case X86::VMOVDQUYrm:
759 case X86::VMOVAPSZ256rm:
760 case X86::VMOVUPSZ256rm:
761 case X86::VMOVAPSZ256rm_NOVLX:
762 case X86::VMOVUPSZ256rm_NOVLX:
763 case X86::VMOVAPDZ256rm:
764 case X86::VMOVUPDZ256rm:
765 case X86::VMOVDQU8Z256rm:
766 case X86::VMOVDQU16Z256rm:
767 case X86::VMOVDQA32Z256rm:
768 case X86::VMOVDQU32Z256rm:
769 case X86::VMOVDQA64Z256rm:
770 case X86::VMOVDQU64Z256rm:
771 MemBytes = 32;
772 return true;
773 case X86::VMOVAPSZrm:
774 case X86::VMOVUPSZrm:
775 case X86::VMOVAPDZrm:
776 case X86::VMOVUPDZrm:
777 case X86::VMOVDQU8Zrm:
778 case X86::VMOVDQU16Zrm:
779 case X86::VMOVDQA32Zrm:
780 case X86::VMOVDQU32Zrm:
781 case X86::VMOVDQA64Zrm:
782 case X86::VMOVDQU64Zrm:
783 MemBytes = 64;
784 return true;
785 }
786 }
787
isFrameStoreOpcode(int Opcode,unsigned & MemBytes)788 static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) {
789 switch (Opcode) {
790 default:
791 return false;
792 case X86::MOV8mr:
793 case X86::KMOVBmk:
794 MemBytes = 1;
795 return true;
796 case X86::MOV16mr:
797 case X86::KMOVWmk:
798 MemBytes = 2;
799 return true;
800 case X86::MOV32mr:
801 case X86::MOVSSmr:
802 case X86::VMOVSSmr:
803 case X86::VMOVSSZmr:
804 case X86::KMOVDmk:
805 MemBytes = 4;
806 return true;
807 case X86::MOV64mr:
808 case X86::ST_FpP64m:
809 case X86::MOVSDmr:
810 case X86::VMOVSDmr:
811 case X86::VMOVSDZmr:
812 case X86::MMX_MOVD64mr:
813 case X86::MMX_MOVQ64mr:
814 case X86::MMX_MOVNTQmr:
815 case X86::KMOVQmk:
816 MemBytes = 8;
817 return true;
818 case X86::MOVAPSmr:
819 case X86::MOVUPSmr:
820 case X86::MOVAPDmr:
821 case X86::MOVUPDmr:
822 case X86::MOVDQAmr:
823 case X86::MOVDQUmr:
824 case X86::VMOVAPSmr:
825 case X86::VMOVUPSmr:
826 case X86::VMOVAPDmr:
827 case X86::VMOVUPDmr:
828 case X86::VMOVDQAmr:
829 case X86::VMOVDQUmr:
830 case X86::VMOVUPSZ128mr:
831 case X86::VMOVAPSZ128mr:
832 case X86::VMOVUPSZ128mr_NOVLX:
833 case X86::VMOVAPSZ128mr_NOVLX:
834 case X86::VMOVUPDZ128mr:
835 case X86::VMOVAPDZ128mr:
836 case X86::VMOVDQA32Z128mr:
837 case X86::VMOVDQU32Z128mr:
838 case X86::VMOVDQA64Z128mr:
839 case X86::VMOVDQU64Z128mr:
840 case X86::VMOVDQU8Z128mr:
841 case X86::VMOVDQU16Z128mr:
842 MemBytes = 16;
843 return true;
844 case X86::VMOVUPSYmr:
845 case X86::VMOVAPSYmr:
846 case X86::VMOVUPDYmr:
847 case X86::VMOVAPDYmr:
848 case X86::VMOVDQUYmr:
849 case X86::VMOVDQAYmr:
850 case X86::VMOVUPSZ256mr:
851 case X86::VMOVAPSZ256mr:
852 case X86::VMOVUPSZ256mr_NOVLX:
853 case X86::VMOVAPSZ256mr_NOVLX:
854 case X86::VMOVUPDZ256mr:
855 case X86::VMOVAPDZ256mr:
856 case X86::VMOVDQU8Z256mr:
857 case X86::VMOVDQU16Z256mr:
858 case X86::VMOVDQA32Z256mr:
859 case X86::VMOVDQU32Z256mr:
860 case X86::VMOVDQA64Z256mr:
861 case X86::VMOVDQU64Z256mr:
862 MemBytes = 32;
863 return true;
864 case X86::VMOVUPSZmr:
865 case X86::VMOVAPSZmr:
866 case X86::VMOVUPDZmr:
867 case X86::VMOVAPDZmr:
868 case X86::VMOVDQU8Zmr:
869 case X86::VMOVDQU16Zmr:
870 case X86::VMOVDQA32Zmr:
871 case X86::VMOVDQU32Zmr:
872 case X86::VMOVDQA64Zmr:
873 case X86::VMOVDQU64Zmr:
874 MemBytes = 64;
875 return true;
876 }
877 return false;
878 }
879
isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex) const880 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
881 int &FrameIndex) const {
882 unsigned Dummy;
883 return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy);
884 }
885
isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex,unsigned & MemBytes) const886 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
887 int &FrameIndex,
888 unsigned &MemBytes) const {
889 if (isFrameLoadOpcode(MI.getOpcode(), MemBytes))
890 if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
891 return MI.getOperand(0).getReg();
892 return 0;
893 }
894
isLoadFromStackSlotPostFE(const MachineInstr & MI,int & FrameIndex) const895 unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
896 int &FrameIndex) const {
897 unsigned Dummy;
898 if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) {
899 unsigned Reg;
900 if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
901 return Reg;
902 // Check for post-frame index elimination operations
903 SmallVector<const MachineMemOperand *, 1> Accesses;
904 if (hasLoadFromStackSlot(MI, Accesses)) {
905 FrameIndex =
906 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
907 ->getFrameIndex();
908 return 1;
909 }
910 }
911 return 0;
912 }
913
isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex) const914 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
915 int &FrameIndex) const {
916 unsigned Dummy;
917 return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy);
918 }
919
isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex,unsigned & MemBytes) const920 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
921 int &FrameIndex,
922 unsigned &MemBytes) const {
923 if (isFrameStoreOpcode(MI.getOpcode(), MemBytes))
924 if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
925 isFrameOperand(MI, 0, FrameIndex))
926 return MI.getOperand(X86::AddrNumOperands).getReg();
927 return 0;
928 }
929
isStoreToStackSlotPostFE(const MachineInstr & MI,int & FrameIndex) const930 unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
931 int &FrameIndex) const {
932 unsigned Dummy;
933 if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) {
934 unsigned Reg;
935 if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
936 return Reg;
937 // Check for post-frame index elimination operations
938 SmallVector<const MachineMemOperand *, 1> Accesses;
939 if (hasStoreToStackSlot(MI, Accesses)) {
940 FrameIndex =
941 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
942 ->getFrameIndex();
943 return 1;
944 }
945 }
946 return 0;
947 }
948
949 /// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
regIsPICBase(unsigned BaseReg,const MachineRegisterInfo & MRI)950 static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
951 // Don't waste compile time scanning use-def chains of physregs.
952 if (!Register::isVirtualRegister(BaseReg))
953 return false;
954 bool isPICBase = false;
955 for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg),
956 E = MRI.def_instr_end(); I != E; ++I) {
957 MachineInstr *DefMI = &*I;
958 if (DefMI->getOpcode() != X86::MOVPC32r)
959 return false;
960 assert(!isPICBase && "More than one PIC base?");
961 isPICBase = true;
962 }
963 return isPICBase;
964 }
965
isReallyTriviallyReMaterializable(const MachineInstr & MI,AAResults * AA) const966 bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
967 AAResults *AA) const {
968 switch (MI.getOpcode()) {
969 default:
970 // This function should only be called for opcodes with the ReMaterializable
971 // flag set.
972 llvm_unreachable("Unknown rematerializable operation!");
973 break;
974
975 case X86::LOAD_STACK_GUARD:
976 case X86::AVX1_SETALLONES:
977 case X86::AVX2_SETALLONES:
978 case X86::AVX512_128_SET0:
979 case X86::AVX512_256_SET0:
980 case X86::AVX512_512_SET0:
981 case X86::AVX512_512_SETALLONES:
982 case X86::AVX512_FsFLD0SD:
983 case X86::AVX512_FsFLD0SS:
984 case X86::AVX512_FsFLD0F128:
985 case X86::AVX_SET0:
986 case X86::FsFLD0SD:
987 case X86::FsFLD0SS:
988 case X86::FsFLD0F128:
989 case X86::KSET0D:
990 case X86::KSET0Q:
991 case X86::KSET0W:
992 case X86::KSET1D:
993 case X86::KSET1Q:
994 case X86::KSET1W:
995 case X86::MMX_SET0:
996 case X86::MOV32ImmSExti8:
997 case X86::MOV32r0:
998 case X86::MOV32r1:
999 case X86::MOV32r_1:
1000 case X86::MOV32ri64:
1001 case X86::MOV64ImmSExti8:
1002 case X86::V_SET0:
1003 case X86::V_SETALLONES:
1004 case X86::MOV16ri:
1005 case X86::MOV32ri:
1006 case X86::MOV64ri:
1007 case X86::MOV64ri32:
1008 case X86::MOV8ri:
1009 return true;
1010
1011 case X86::MOV8rm:
1012 case X86::MOV8rm_NOREX:
1013 case X86::MOV16rm:
1014 case X86::MOV32rm:
1015 case X86::MOV64rm:
1016 case X86::MOVSSrm:
1017 case X86::MOVSSrm_alt:
1018 case X86::MOVSDrm:
1019 case X86::MOVSDrm_alt:
1020 case X86::MOVAPSrm:
1021 case X86::MOVUPSrm:
1022 case X86::MOVAPDrm:
1023 case X86::MOVUPDrm:
1024 case X86::MOVDQArm:
1025 case X86::MOVDQUrm:
1026 case X86::VMOVSSrm:
1027 case X86::VMOVSSrm_alt:
1028 case X86::VMOVSDrm:
1029 case X86::VMOVSDrm_alt:
1030 case X86::VMOVAPSrm:
1031 case X86::VMOVUPSrm:
1032 case X86::VMOVAPDrm:
1033 case X86::VMOVUPDrm:
1034 case X86::VMOVDQArm:
1035 case X86::VMOVDQUrm:
1036 case X86::VMOVAPSYrm:
1037 case X86::VMOVUPSYrm:
1038 case X86::VMOVAPDYrm:
1039 case X86::VMOVUPDYrm:
1040 case X86::VMOVDQAYrm:
1041 case X86::VMOVDQUYrm:
1042 case X86::MMX_MOVD64rm:
1043 case X86::MMX_MOVQ64rm:
1044 // AVX-512
1045 case X86::VMOVSSZrm:
1046 case X86::VMOVSSZrm_alt:
1047 case X86::VMOVSDZrm:
1048 case X86::VMOVSDZrm_alt:
1049 case X86::VMOVAPDZ128rm:
1050 case X86::VMOVAPDZ256rm:
1051 case X86::VMOVAPDZrm:
1052 case X86::VMOVAPSZ128rm:
1053 case X86::VMOVAPSZ256rm:
1054 case X86::VMOVAPSZ128rm_NOVLX:
1055 case X86::VMOVAPSZ256rm_NOVLX:
1056 case X86::VMOVAPSZrm:
1057 case X86::VMOVDQA32Z128rm:
1058 case X86::VMOVDQA32Z256rm:
1059 case X86::VMOVDQA32Zrm:
1060 case X86::VMOVDQA64Z128rm:
1061 case X86::VMOVDQA64Z256rm:
1062 case X86::VMOVDQA64Zrm:
1063 case X86::VMOVDQU16Z128rm:
1064 case X86::VMOVDQU16Z256rm:
1065 case X86::VMOVDQU16Zrm:
1066 case X86::VMOVDQU32Z128rm:
1067 case X86::VMOVDQU32Z256rm:
1068 case X86::VMOVDQU32Zrm:
1069 case X86::VMOVDQU64Z128rm:
1070 case X86::VMOVDQU64Z256rm:
1071 case X86::VMOVDQU64Zrm:
1072 case X86::VMOVDQU8Z128rm:
1073 case X86::VMOVDQU8Z256rm:
1074 case X86::VMOVDQU8Zrm:
1075 case X86::VMOVUPDZ128rm:
1076 case X86::VMOVUPDZ256rm:
1077 case X86::VMOVUPDZrm:
1078 case X86::VMOVUPSZ128rm:
1079 case X86::VMOVUPSZ256rm:
1080 case X86::VMOVUPSZ128rm_NOVLX:
1081 case X86::VMOVUPSZ256rm_NOVLX:
1082 case X86::VMOVUPSZrm: {
1083 // Loads from constant pools are trivially rematerializable.
1084 if (MI.getOperand(1 + X86::AddrBaseReg).isReg() &&
1085 MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
1086 MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
1087 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
1088 MI.isDereferenceableInvariantLoad(AA)) {
1089 Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
1090 if (BaseReg == 0 || BaseReg == X86::RIP)
1091 return true;
1092 // Allow re-materialization of PIC load.
1093 if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal())
1094 return false;
1095 const MachineFunction &MF = *MI.getParent()->getParent();
1096 const MachineRegisterInfo &MRI = MF.getRegInfo();
1097 return regIsPICBase(BaseReg, MRI);
1098 }
1099 return false;
1100 }
1101
1102 case X86::LEA32r:
1103 case X86::LEA64r: {
1104 if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
1105 MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
1106 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
1107 !MI.getOperand(1 + X86::AddrDisp).isReg()) {
1108 // lea fi#, lea GV, etc. are all rematerializable.
1109 if (!MI.getOperand(1 + X86::AddrBaseReg).isReg())
1110 return true;
1111 Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
1112 if (BaseReg == 0)
1113 return true;
1114 // Allow re-materialization of lea PICBase + x.
1115 const MachineFunction &MF = *MI.getParent()->getParent();
1116 const MachineRegisterInfo &MRI = MF.getRegInfo();
1117 return regIsPICBase(BaseReg, MRI);
1118 }
1119 return false;
1120 }
1121 }
1122 }
1123
reMaterialize(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register DestReg,unsigned SubIdx,const MachineInstr & Orig,const TargetRegisterInfo & TRI) const1124 void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
1125 MachineBasicBlock::iterator I,
1126 Register DestReg, unsigned SubIdx,
1127 const MachineInstr &Orig,
1128 const TargetRegisterInfo &TRI) const {
1129 bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
1130 if (ClobbersEFLAGS && !isSafeToClobberEFLAGS(MBB, I)) {
1131 // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
1132 // effects.
1133 int Value;
1134 switch (Orig.getOpcode()) {
1135 case X86::MOV32r0: Value = 0; break;
1136 case X86::MOV32r1: Value = 1; break;
1137 case X86::MOV32r_1: Value = -1; break;
1138 default:
1139 llvm_unreachable("Unexpected instruction!");
1140 }
1141
1142 const DebugLoc &DL = Orig.getDebugLoc();
1143 BuildMI(MBB, I, DL, get(X86::MOV32ri))
1144 .add(Orig.getOperand(0))
1145 .addImm(Value);
1146 } else {
1147 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
1148 MBB.insert(I, MI);
1149 }
1150
1151 MachineInstr &NewMI = *std::prev(I);
1152 NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
1153 }
1154
1155 /// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
hasLiveCondCodeDef(MachineInstr & MI) const1156 bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const {
1157 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1158 MachineOperand &MO = MI.getOperand(i);
1159 if (MO.isReg() && MO.isDef() &&
1160 MO.getReg() == X86::EFLAGS && !MO.isDead()) {
1161 return true;
1162 }
1163 }
1164 return false;
1165 }
1166
1167 /// Check whether the shift count for a machine operand is non-zero.
getTruncatedShiftCount(const MachineInstr & MI,unsigned ShiftAmtOperandIdx)1168 inline static unsigned getTruncatedShiftCount(const MachineInstr &MI,
1169 unsigned ShiftAmtOperandIdx) {
1170 // The shift count is six bits with the REX.W prefix and five bits without.
1171 unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
1172 unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm();
1173 return Imm & ShiftCountMask;
1174 }
1175
1176 /// Check whether the given shift count is appropriate
1177 /// can be represented by a LEA instruction.
isTruncatedShiftCountForLEA(unsigned ShAmt)1178 inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
1179 // Left shift instructions can be transformed into load-effective-address
1180 // instructions if we can encode them appropriately.
1181 // A LEA instruction utilizes a SIB byte to encode its scale factor.
1182 // The SIB.scale field is two bits wide which means that we can encode any
1183 // shift amount less than 4.
1184 return ShAmt < 4 && ShAmt > 0;
1185 }
1186
classifyLEAReg(MachineInstr & MI,const MachineOperand & Src,unsigned Opc,bool AllowSP,Register & NewSrc,bool & isKill,MachineOperand & ImplicitOp,LiveVariables * LV) const1187 bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
1188 unsigned Opc, bool AllowSP, Register &NewSrc,
1189 bool &isKill, MachineOperand &ImplicitOp,
1190 LiveVariables *LV) const {
1191 MachineFunction &MF = *MI.getParent()->getParent();
1192 const TargetRegisterClass *RC;
1193 if (AllowSP) {
1194 RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
1195 } else {
1196 RC = Opc != X86::LEA32r ?
1197 &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
1198 }
1199 Register SrcReg = Src.getReg();
1200
1201 // For both LEA64 and LEA32 the register already has essentially the right
1202 // type (32-bit or 64-bit) we may just need to forbid SP.
1203 if (Opc != X86::LEA64_32r) {
1204 NewSrc = SrcReg;
1205 isKill = Src.isKill();
1206 assert(!Src.isUndef() && "Undef op doesn't need optimization");
1207
1208 if (Register::isVirtualRegister(NewSrc) &&
1209 !MF.getRegInfo().constrainRegClass(NewSrc, RC))
1210 return false;
1211
1212 return true;
1213 }
1214
1215 // This is for an LEA64_32r and incoming registers are 32-bit. One way or
1216 // another we need to add 64-bit registers to the final MI.
1217 if (Register::isPhysicalRegister(SrcReg)) {
1218 ImplicitOp = Src;
1219 ImplicitOp.setImplicit();
1220
1221 NewSrc = getX86SubSuperRegister(Src.getReg(), 64);
1222 isKill = Src.isKill();
1223 assert(!Src.isUndef() && "Undef op doesn't need optimization");
1224 } else {
1225 // Virtual register of the wrong class, we have to create a temporary 64-bit
1226 // vreg to feed into the LEA.
1227 NewSrc = MF.getRegInfo().createVirtualRegister(RC);
1228 MachineInstr *Copy =
1229 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1230 .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
1231 .add(Src);
1232
1233 // Which is obviously going to be dead after we're done with it.
1234 isKill = true;
1235
1236 if (LV)
1237 LV->replaceKillInstruction(SrcReg, MI, *Copy);
1238 }
1239
1240 // We've set all the parameters without issue.
1241 return true;
1242 }
1243
convertToThreeAddressWithLEA(unsigned MIOpc,MachineFunction::iterator & MFI,MachineInstr & MI,LiveVariables * LV,bool Is8BitOp) const1244 MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
1245 unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI,
1246 LiveVariables *LV, bool Is8BitOp) const {
1247 // We handle 8-bit adds and various 16-bit opcodes in the switch below.
1248 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
1249 assert((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
1250 *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) &&
1251 "Unexpected type for LEA transform");
1252
1253 // TODO: For a 32-bit target, we need to adjust the LEA variables with
1254 // something like this:
1255 // Opcode = X86::LEA32r;
1256 // InRegLEA = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1257 // OutRegLEA =
1258 // Is8BitOp ? RegInfo.createVirtualRegister(&X86::GR32ABCD_RegClass)
1259 // : RegInfo.createVirtualRegister(&X86::GR32RegClass);
1260 if (!Subtarget.is64Bit())
1261 return nullptr;
1262
1263 unsigned Opcode = X86::LEA64_32r;
1264 Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1265 Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1266
1267 // Build and insert into an implicit UNDEF value. This is OK because
1268 // we will be shifting and then extracting the lower 8/16-bits.
1269 // This has the potential to cause partial register stall. e.g.
1270 // movw (%rbp,%rcx,2), %dx
1271 // leal -65(%rdx), %esi
1272 // But testing has shown this *does* help performance in 64-bit mode (at
1273 // least on modern x86 machines).
1274 MachineBasicBlock::iterator MBBI = MI.getIterator();
1275 Register Dest = MI.getOperand(0).getReg();
1276 Register Src = MI.getOperand(1).getReg();
1277 bool IsDead = MI.getOperand(0).isDead();
1278 bool IsKill = MI.getOperand(1).isKill();
1279 unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
1280 assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization");
1281 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA);
1282 MachineInstr *InsMI =
1283 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1284 .addReg(InRegLEA, RegState::Define, SubReg)
1285 .addReg(Src, getKillRegState(IsKill));
1286
1287 MachineInstrBuilder MIB =
1288 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA);
1289 switch (MIOpc) {
1290 default: llvm_unreachable("Unreachable!");
1291 case X86::SHL8ri:
1292 case X86::SHL16ri: {
1293 unsigned ShAmt = MI.getOperand(2).getImm();
1294 MIB.addReg(0).addImm(1ULL << ShAmt)
1295 .addReg(InRegLEA, RegState::Kill).addImm(0).addReg(0);
1296 break;
1297 }
1298 case X86::INC8r:
1299 case X86::INC16r:
1300 addRegOffset(MIB, InRegLEA, true, 1);
1301 break;
1302 case X86::DEC8r:
1303 case X86::DEC16r:
1304 addRegOffset(MIB, InRegLEA, true, -1);
1305 break;
1306 case X86::ADD8ri:
1307 case X86::ADD8ri_DB:
1308 case X86::ADD16ri:
1309 case X86::ADD16ri8:
1310 case X86::ADD16ri_DB:
1311 case X86::ADD16ri8_DB:
1312 addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm());
1313 break;
1314 case X86::ADD8rr:
1315 case X86::ADD8rr_DB:
1316 case X86::ADD16rr:
1317 case X86::ADD16rr_DB: {
1318 Register Src2 = MI.getOperand(2).getReg();
1319 bool IsKill2 = MI.getOperand(2).isKill();
1320 assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization");
1321 unsigned InRegLEA2 = 0;
1322 MachineInstr *InsMI2 = nullptr;
1323 if (Src == Src2) {
1324 // ADD8rr/ADD16rr killed %reg1028, %reg1028
1325 // just a single insert_subreg.
1326 addRegReg(MIB, InRegLEA, true, InRegLEA, false);
1327 } else {
1328 if (Subtarget.is64Bit())
1329 InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1330 else
1331 InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1332 // Build and insert into an implicit UNDEF value. This is OK because
1333 // we will be shifting and then extracting the lower 8/16-bits.
1334 BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA2);
1335 InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
1336 .addReg(InRegLEA2, RegState::Define, SubReg)
1337 .addReg(Src2, getKillRegState(IsKill2));
1338 addRegReg(MIB, InRegLEA, true, InRegLEA2, true);
1339 }
1340 if (LV && IsKill2 && InsMI2)
1341 LV->replaceKillInstruction(Src2, MI, *InsMI2);
1342 break;
1343 }
1344 }
1345
1346 MachineInstr *NewMI = MIB;
1347 MachineInstr *ExtMI =
1348 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1349 .addReg(Dest, RegState::Define | getDeadRegState(IsDead))
1350 .addReg(OutRegLEA, RegState::Kill, SubReg);
1351
1352 if (LV) {
1353 // Update live variables.
1354 LV->getVarInfo(InRegLEA).Kills.push_back(NewMI);
1355 LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI);
1356 if (IsKill)
1357 LV->replaceKillInstruction(Src, MI, *InsMI);
1358 if (IsDead)
1359 LV->replaceKillInstruction(Dest, MI, *ExtMI);
1360 }
1361
1362 return ExtMI;
1363 }
1364
1365 /// This method must be implemented by targets that
1366 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
1367 /// may be able to convert a two-address instruction into a true
1368 /// three-address instruction on demand. This allows the X86 target (for
1369 /// example) to convert ADD and SHL instructions into LEA instructions if they
1370 /// would require register copies due to two-addressness.
1371 ///
1372 /// This method returns a null pointer if the transformation cannot be
1373 /// performed, otherwise it returns the new instruction.
1374 ///
1375 MachineInstr *
convertToThreeAddress(MachineFunction::iterator & MFI,MachineInstr & MI,LiveVariables * LV) const1376 X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
1377 MachineInstr &MI, LiveVariables *LV) const {
1378 // The following opcodes also sets the condition code register(s). Only
1379 // convert them to equivalent lea if the condition code register def's
1380 // are dead!
1381 if (hasLiveCondCodeDef(MI))
1382 return nullptr;
1383
1384 MachineFunction &MF = *MI.getParent()->getParent();
1385 // All instructions input are two-addr instructions. Get the known operands.
1386 const MachineOperand &Dest = MI.getOperand(0);
1387 const MachineOperand &Src = MI.getOperand(1);
1388
1389 // Ideally, operations with undef should be folded before we get here, but we
1390 // can't guarantee it. Bail out because optimizing undefs is a waste of time.
1391 // Without this, we have to forward undef state to new register operands to
1392 // avoid machine verifier errors.
1393 if (Src.isUndef())
1394 return nullptr;
1395 if (MI.getNumOperands() > 2)
1396 if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef())
1397 return nullptr;
1398
1399 MachineInstr *NewMI = nullptr;
1400 bool Is64Bit = Subtarget.is64Bit();
1401
1402 bool Is8BitOp = false;
1403 unsigned MIOpc = MI.getOpcode();
1404 switch (MIOpc) {
1405 default: llvm_unreachable("Unreachable!");
1406 case X86::SHL64ri: {
1407 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1408 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1409 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
1410
1411 // LEA can't handle RSP.
1412 if (Register::isVirtualRegister(Src.getReg()) &&
1413 !MF.getRegInfo().constrainRegClass(Src.getReg(),
1414 &X86::GR64_NOSPRegClass))
1415 return nullptr;
1416
1417 NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
1418 .add(Dest)
1419 .addReg(0)
1420 .addImm(1ULL << ShAmt)
1421 .add(Src)
1422 .addImm(0)
1423 .addReg(0);
1424 break;
1425 }
1426 case X86::SHL32ri: {
1427 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1428 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1429 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
1430
1431 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1432
1433 // LEA can't handle ESP.
1434 bool isKill;
1435 Register SrcReg;
1436 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1437 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
1438 SrcReg, isKill, ImplicitOp, LV))
1439 return nullptr;
1440
1441 MachineInstrBuilder MIB =
1442 BuildMI(MF, MI.getDebugLoc(), get(Opc))
1443 .add(Dest)
1444 .addReg(0)
1445 .addImm(1ULL << ShAmt)
1446 .addReg(SrcReg, getKillRegState(isKill))
1447 .addImm(0)
1448 .addReg(0);
1449 if (ImplicitOp.getReg() != 0)
1450 MIB.add(ImplicitOp);
1451 NewMI = MIB;
1452
1453 break;
1454 }
1455 case X86::SHL8ri:
1456 Is8BitOp = true;
1457 LLVM_FALLTHROUGH;
1458 case X86::SHL16ri: {
1459 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1460 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1461 if (!isTruncatedShiftCountForLEA(ShAmt))
1462 return nullptr;
1463 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
1464 }
1465 case X86::INC64r:
1466 case X86::INC32r: {
1467 assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
1468 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r :
1469 (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1470 bool isKill;
1471 Register SrcReg;
1472 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1473 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
1474 ImplicitOp, LV))
1475 return nullptr;
1476
1477 MachineInstrBuilder MIB =
1478 BuildMI(MF, MI.getDebugLoc(), get(Opc))
1479 .add(Dest)
1480 .addReg(SrcReg, getKillRegState(isKill));
1481 if (ImplicitOp.getReg() != 0)
1482 MIB.add(ImplicitOp);
1483
1484 NewMI = addOffset(MIB, 1);
1485 break;
1486 }
1487 case X86::DEC64r:
1488 case X86::DEC32r: {
1489 assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
1490 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
1491 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1492
1493 bool isKill;
1494 Register SrcReg;
1495 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1496 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
1497 ImplicitOp, LV))
1498 return nullptr;
1499
1500 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1501 .add(Dest)
1502 .addReg(SrcReg, getKillRegState(isKill));
1503 if (ImplicitOp.getReg() != 0)
1504 MIB.add(ImplicitOp);
1505
1506 NewMI = addOffset(MIB, -1);
1507
1508 break;
1509 }
1510 case X86::DEC8r:
1511 case X86::INC8r:
1512 Is8BitOp = true;
1513 LLVM_FALLTHROUGH;
1514 case X86::DEC16r:
1515 case X86::INC16r:
1516 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
1517 case X86::ADD64rr:
1518 case X86::ADD64rr_DB:
1519 case X86::ADD32rr:
1520 case X86::ADD32rr_DB: {
1521 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1522 unsigned Opc;
1523 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
1524 Opc = X86::LEA64r;
1525 else
1526 Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1527
1528 bool isKill;
1529 Register SrcReg;
1530 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1531 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1532 SrcReg, isKill, ImplicitOp, LV))
1533 return nullptr;
1534
1535 const MachineOperand &Src2 = MI.getOperand(2);
1536 bool isKill2;
1537 Register SrcReg2;
1538 MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
1539 if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
1540 SrcReg2, isKill2, ImplicitOp2, LV))
1541 return nullptr;
1542
1543 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest);
1544 if (ImplicitOp.getReg() != 0)
1545 MIB.add(ImplicitOp);
1546 if (ImplicitOp2.getReg() != 0)
1547 MIB.add(ImplicitOp2);
1548
1549 NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2);
1550 if (LV && Src2.isKill())
1551 LV->replaceKillInstruction(SrcReg2, MI, *NewMI);
1552 break;
1553 }
1554 case X86::ADD8rr:
1555 case X86::ADD8rr_DB:
1556 Is8BitOp = true;
1557 LLVM_FALLTHROUGH;
1558 case X86::ADD16rr:
1559 case X86::ADD16rr_DB:
1560 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
1561 case X86::ADD64ri32:
1562 case X86::ADD64ri8:
1563 case X86::ADD64ri32_DB:
1564 case X86::ADD64ri8_DB:
1565 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1566 NewMI = addOffset(
1567 BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src),
1568 MI.getOperand(2));
1569 break;
1570 case X86::ADD32ri:
1571 case X86::ADD32ri8:
1572 case X86::ADD32ri_DB:
1573 case X86::ADD32ri8_DB: {
1574 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1575 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1576
1577 bool isKill;
1578 Register SrcReg;
1579 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1580 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1581 SrcReg, isKill, ImplicitOp, LV))
1582 return nullptr;
1583
1584 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1585 .add(Dest)
1586 .addReg(SrcReg, getKillRegState(isKill));
1587 if (ImplicitOp.getReg() != 0)
1588 MIB.add(ImplicitOp);
1589
1590 NewMI = addOffset(MIB, MI.getOperand(2));
1591 break;
1592 }
1593 case X86::ADD8ri:
1594 case X86::ADD8ri_DB:
1595 Is8BitOp = true;
1596 LLVM_FALLTHROUGH;
1597 case X86::ADD16ri:
1598 case X86::ADD16ri8:
1599 case X86::ADD16ri_DB:
1600 case X86::ADD16ri8_DB:
1601 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
1602 case X86::SUB8ri:
1603 case X86::SUB16ri8:
1604 case X86::SUB16ri:
1605 /// FIXME: Support these similar to ADD8ri/ADD16ri*.
1606 return nullptr;
1607 case X86::SUB32ri8:
1608 case X86::SUB32ri: {
1609 if (!MI.getOperand(2).isImm())
1610 return nullptr;
1611 int64_t Imm = MI.getOperand(2).getImm();
1612 if (!isInt<32>(-Imm))
1613 return nullptr;
1614
1615 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1616 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1617
1618 bool isKill;
1619 Register SrcReg;
1620 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1621 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1622 SrcReg, isKill, ImplicitOp, LV))
1623 return nullptr;
1624
1625 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1626 .add(Dest)
1627 .addReg(SrcReg, getKillRegState(isKill));
1628 if (ImplicitOp.getReg() != 0)
1629 MIB.add(ImplicitOp);
1630
1631 NewMI = addOffset(MIB, -Imm);
1632 break;
1633 }
1634
1635 case X86::SUB64ri8:
1636 case X86::SUB64ri32: {
1637 if (!MI.getOperand(2).isImm())
1638 return nullptr;
1639 int64_t Imm = MI.getOperand(2).getImm();
1640 if (!isInt<32>(-Imm))
1641 return nullptr;
1642
1643 assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!");
1644
1645 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(),
1646 get(X86::LEA64r)).add(Dest).add(Src);
1647 NewMI = addOffset(MIB, -Imm);
1648 break;
1649 }
1650
1651 case X86::VMOVDQU8Z128rmk:
1652 case X86::VMOVDQU8Z256rmk:
1653 case X86::VMOVDQU8Zrmk:
1654 case X86::VMOVDQU16Z128rmk:
1655 case X86::VMOVDQU16Z256rmk:
1656 case X86::VMOVDQU16Zrmk:
1657 case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk:
1658 case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk:
1659 case X86::VMOVDQU32Zrmk: case X86::VMOVDQA32Zrmk:
1660 case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk:
1661 case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk:
1662 case X86::VMOVDQU64Zrmk: case X86::VMOVDQA64Zrmk:
1663 case X86::VMOVUPDZ128rmk: case X86::VMOVAPDZ128rmk:
1664 case X86::VMOVUPDZ256rmk: case X86::VMOVAPDZ256rmk:
1665 case X86::VMOVUPDZrmk: case X86::VMOVAPDZrmk:
1666 case X86::VMOVUPSZ128rmk: case X86::VMOVAPSZ128rmk:
1667 case X86::VMOVUPSZ256rmk: case X86::VMOVAPSZ256rmk:
1668 case X86::VMOVUPSZrmk: case X86::VMOVAPSZrmk:
1669 case X86::VBROADCASTSDZ256rmk:
1670 case X86::VBROADCASTSDZrmk:
1671 case X86::VBROADCASTSSZ128rmk:
1672 case X86::VBROADCASTSSZ256rmk:
1673 case X86::VBROADCASTSSZrmk:
1674 case X86::VPBROADCASTDZ128rmk:
1675 case X86::VPBROADCASTDZ256rmk:
1676 case X86::VPBROADCASTDZrmk:
1677 case X86::VPBROADCASTQZ128rmk:
1678 case X86::VPBROADCASTQZ256rmk:
1679 case X86::VPBROADCASTQZrmk: {
1680 unsigned Opc;
1681 switch (MIOpc) {
1682 default: llvm_unreachable("Unreachable!");
1683 case X86::VMOVDQU8Z128rmk: Opc = X86::VPBLENDMBZ128rmk; break;
1684 case X86::VMOVDQU8Z256rmk: Opc = X86::VPBLENDMBZ256rmk; break;
1685 case X86::VMOVDQU8Zrmk: Opc = X86::VPBLENDMBZrmk; break;
1686 case X86::VMOVDQU16Z128rmk: Opc = X86::VPBLENDMWZ128rmk; break;
1687 case X86::VMOVDQU16Z256rmk: Opc = X86::VPBLENDMWZ256rmk; break;
1688 case X86::VMOVDQU16Zrmk: Opc = X86::VPBLENDMWZrmk; break;
1689 case X86::VMOVDQU32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break;
1690 case X86::VMOVDQU32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break;
1691 case X86::VMOVDQU32Zrmk: Opc = X86::VPBLENDMDZrmk; break;
1692 case X86::VMOVDQU64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break;
1693 case X86::VMOVDQU64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break;
1694 case X86::VMOVDQU64Zrmk: Opc = X86::VPBLENDMQZrmk; break;
1695 case X86::VMOVUPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break;
1696 case X86::VMOVUPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break;
1697 case X86::VMOVUPDZrmk: Opc = X86::VBLENDMPDZrmk; break;
1698 case X86::VMOVUPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break;
1699 case X86::VMOVUPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break;
1700 case X86::VMOVUPSZrmk: Opc = X86::VBLENDMPSZrmk; break;
1701 case X86::VMOVDQA32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break;
1702 case X86::VMOVDQA32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break;
1703 case X86::VMOVDQA32Zrmk: Opc = X86::VPBLENDMDZrmk; break;
1704 case X86::VMOVDQA64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break;
1705 case X86::VMOVDQA64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break;
1706 case X86::VMOVDQA64Zrmk: Opc = X86::VPBLENDMQZrmk; break;
1707 case X86::VMOVAPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break;
1708 case X86::VMOVAPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break;
1709 case X86::VMOVAPDZrmk: Opc = X86::VBLENDMPDZrmk; break;
1710 case X86::VMOVAPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break;
1711 case X86::VMOVAPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break;
1712 case X86::VMOVAPSZrmk: Opc = X86::VBLENDMPSZrmk; break;
1713 case X86::VBROADCASTSDZ256rmk: Opc = X86::VBLENDMPDZ256rmbk; break;
1714 case X86::VBROADCASTSDZrmk: Opc = X86::VBLENDMPDZrmbk; break;
1715 case X86::VBROADCASTSSZ128rmk: Opc = X86::VBLENDMPSZ128rmbk; break;
1716 case X86::VBROADCASTSSZ256rmk: Opc = X86::VBLENDMPSZ256rmbk; break;
1717 case X86::VBROADCASTSSZrmk: Opc = X86::VBLENDMPSZrmbk; break;
1718 case X86::VPBROADCASTDZ128rmk: Opc = X86::VPBLENDMDZ128rmbk; break;
1719 case X86::VPBROADCASTDZ256rmk: Opc = X86::VPBLENDMDZ256rmbk; break;
1720 case X86::VPBROADCASTDZrmk: Opc = X86::VPBLENDMDZrmbk; break;
1721 case X86::VPBROADCASTQZ128rmk: Opc = X86::VPBLENDMQZ128rmbk; break;
1722 case X86::VPBROADCASTQZ256rmk: Opc = X86::VPBLENDMQZ256rmbk; break;
1723 case X86::VPBROADCASTQZrmk: Opc = X86::VPBLENDMQZrmbk; break;
1724 }
1725
1726 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1727 .add(Dest)
1728 .add(MI.getOperand(2))
1729 .add(Src)
1730 .add(MI.getOperand(3))
1731 .add(MI.getOperand(4))
1732 .add(MI.getOperand(5))
1733 .add(MI.getOperand(6))
1734 .add(MI.getOperand(7));
1735 break;
1736 }
1737
1738 case X86::VMOVDQU8Z128rrk:
1739 case X86::VMOVDQU8Z256rrk:
1740 case X86::VMOVDQU8Zrrk:
1741 case X86::VMOVDQU16Z128rrk:
1742 case X86::VMOVDQU16Z256rrk:
1743 case X86::VMOVDQU16Zrrk:
1744 case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk:
1745 case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk:
1746 case X86::VMOVDQU32Zrrk: case X86::VMOVDQA32Zrrk:
1747 case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk:
1748 case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk:
1749 case X86::VMOVDQU64Zrrk: case X86::VMOVDQA64Zrrk:
1750 case X86::VMOVUPDZ128rrk: case X86::VMOVAPDZ128rrk:
1751 case X86::VMOVUPDZ256rrk: case X86::VMOVAPDZ256rrk:
1752 case X86::VMOVUPDZrrk: case X86::VMOVAPDZrrk:
1753 case X86::VMOVUPSZ128rrk: case X86::VMOVAPSZ128rrk:
1754 case X86::VMOVUPSZ256rrk: case X86::VMOVAPSZ256rrk:
1755 case X86::VMOVUPSZrrk: case X86::VMOVAPSZrrk: {
1756 unsigned Opc;
1757 switch (MIOpc) {
1758 default: llvm_unreachable("Unreachable!");
1759 case X86::VMOVDQU8Z128rrk: Opc = X86::VPBLENDMBZ128rrk; break;
1760 case X86::VMOVDQU8Z256rrk: Opc = X86::VPBLENDMBZ256rrk; break;
1761 case X86::VMOVDQU8Zrrk: Opc = X86::VPBLENDMBZrrk; break;
1762 case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break;
1763 case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break;
1764 case X86::VMOVDQU16Zrrk: Opc = X86::VPBLENDMWZrrk; break;
1765 case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
1766 case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
1767 case X86::VMOVDQU32Zrrk: Opc = X86::VPBLENDMDZrrk; break;
1768 case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
1769 case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
1770 case X86::VMOVDQU64Zrrk: Opc = X86::VPBLENDMQZrrk; break;
1771 case X86::VMOVUPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break;
1772 case X86::VMOVUPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break;
1773 case X86::VMOVUPDZrrk: Opc = X86::VBLENDMPDZrrk; break;
1774 case X86::VMOVUPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break;
1775 case X86::VMOVUPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break;
1776 case X86::VMOVUPSZrrk: Opc = X86::VBLENDMPSZrrk; break;
1777 case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
1778 case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
1779 case X86::VMOVDQA32Zrrk: Opc = X86::VPBLENDMDZrrk; break;
1780 case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
1781 case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
1782 case X86::VMOVDQA64Zrrk: Opc = X86::VPBLENDMQZrrk; break;
1783 case X86::VMOVAPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break;
1784 case X86::VMOVAPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break;
1785 case X86::VMOVAPDZrrk: Opc = X86::VBLENDMPDZrrk; break;
1786 case X86::VMOVAPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break;
1787 case X86::VMOVAPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break;
1788 case X86::VMOVAPSZrrk: Opc = X86::VBLENDMPSZrrk; break;
1789 }
1790
1791 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1792 .add(Dest)
1793 .add(MI.getOperand(2))
1794 .add(Src)
1795 .add(MI.getOperand(3));
1796 break;
1797 }
1798 }
1799
1800 if (!NewMI) return nullptr;
1801
1802 if (LV) { // Update live variables
1803 if (Src.isKill())
1804 LV->replaceKillInstruction(Src.getReg(), MI, *NewMI);
1805 if (Dest.isDead())
1806 LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI);
1807 }
1808
1809 MFI->insert(MI.getIterator(), NewMI); // Insert the new inst
1810 return NewMI;
1811 }
1812
1813 /// This determines which of three possible cases of a three source commute
1814 /// the source indexes correspond to taking into account any mask operands.
1815 /// All prevents commuting a passthru operand. Returns -1 if the commute isn't
1816 /// possible.
1817 /// Case 0 - Possible to commute the first and second operands.
1818 /// Case 1 - Possible to commute the first and third operands.
1819 /// Case 2 - Possible to commute the second and third operands.
getThreeSrcCommuteCase(uint64_t TSFlags,unsigned SrcOpIdx1,unsigned SrcOpIdx2)1820 static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1,
1821 unsigned SrcOpIdx2) {
1822 // Put the lowest index to SrcOpIdx1 to simplify the checks below.
1823 if (SrcOpIdx1 > SrcOpIdx2)
1824 std::swap(SrcOpIdx1, SrcOpIdx2);
1825
1826 unsigned Op1 = 1, Op2 = 2, Op3 = 3;
1827 if (X86II::isKMasked(TSFlags)) {
1828 Op2++;
1829 Op3++;
1830 }
1831
1832 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
1833 return 0;
1834 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
1835 return 1;
1836 if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
1837 return 2;
1838 llvm_unreachable("Unknown three src commute case.");
1839 }
1840
getFMA3OpcodeToCommuteOperands(const MachineInstr & MI,unsigned SrcOpIdx1,unsigned SrcOpIdx2,const X86InstrFMA3Group & FMA3Group) const1841 unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
1842 const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2,
1843 const X86InstrFMA3Group &FMA3Group) const {
1844
1845 unsigned Opc = MI.getOpcode();
1846
1847 // TODO: Commuting the 1st operand of FMA*_Int requires some additional
1848 // analysis. The commute optimization is legal only if all users of FMA*_Int
1849 // use only the lowest element of the FMA*_Int instruction. Such analysis are
1850 // not implemented yet. So, just return 0 in that case.
1851 // When such analysis are available this place will be the right place for
1852 // calling it.
1853 assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) &&
1854 "Intrinsic instructions can't commute operand 1");
1855
1856 // Determine which case this commute is or if it can't be done.
1857 unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
1858 SrcOpIdx2);
1859 assert(Case < 3 && "Unexpected case number!");
1860
1861 // Define the FMA forms mapping array that helps to map input FMA form
1862 // to output FMA form to preserve the operation semantics after
1863 // commuting the operands.
1864 const unsigned Form132Index = 0;
1865 const unsigned Form213Index = 1;
1866 const unsigned Form231Index = 2;
1867 static const unsigned FormMapping[][3] = {
1868 // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2;
1869 // FMA132 A, C, b; ==> FMA231 C, A, b;
1870 // FMA213 B, A, c; ==> FMA213 A, B, c;
1871 // FMA231 C, A, b; ==> FMA132 A, C, b;
1872 { Form231Index, Form213Index, Form132Index },
1873 // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3;
1874 // FMA132 A, c, B; ==> FMA132 B, c, A;
1875 // FMA213 B, a, C; ==> FMA231 C, a, B;
1876 // FMA231 C, a, B; ==> FMA213 B, a, C;
1877 { Form132Index, Form231Index, Form213Index },
1878 // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3;
1879 // FMA132 a, C, B; ==> FMA213 a, B, C;
1880 // FMA213 b, A, C; ==> FMA132 b, C, A;
1881 // FMA231 c, A, B; ==> FMA231 c, B, A;
1882 { Form213Index, Form132Index, Form231Index }
1883 };
1884
1885 unsigned FMAForms[3];
1886 FMAForms[0] = FMA3Group.get132Opcode();
1887 FMAForms[1] = FMA3Group.get213Opcode();
1888 FMAForms[2] = FMA3Group.get231Opcode();
1889 unsigned FormIndex;
1890 for (FormIndex = 0; FormIndex < 3; FormIndex++)
1891 if (Opc == FMAForms[FormIndex])
1892 break;
1893
1894 // Everything is ready, just adjust the FMA opcode and return it.
1895 FormIndex = FormMapping[Case][FormIndex];
1896 return FMAForms[FormIndex];
1897 }
1898
commuteVPTERNLOG(MachineInstr & MI,unsigned SrcOpIdx1,unsigned SrcOpIdx2)1899 static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1,
1900 unsigned SrcOpIdx2) {
1901 // Determine which case this commute is or if it can't be done.
1902 unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
1903 SrcOpIdx2);
1904 assert(Case < 3 && "Unexpected case value!");
1905
1906 // For each case we need to swap two pairs of bits in the final immediate.
1907 static const uint8_t SwapMasks[3][4] = {
1908 { 0x04, 0x10, 0x08, 0x20 }, // Swap bits 2/4 and 3/5.
1909 { 0x02, 0x10, 0x08, 0x40 }, // Swap bits 1/4 and 3/6.
1910 { 0x02, 0x04, 0x20, 0x40 }, // Swap bits 1/2 and 5/6.
1911 };
1912
1913 uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm();
1914 // Clear out the bits we are swapping.
1915 uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
1916 SwapMasks[Case][2] | SwapMasks[Case][3]);
1917 // If the immediate had a bit of the pair set, then set the opposite bit.
1918 if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1];
1919 if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0];
1920 if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3];
1921 if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2];
1922 MI.getOperand(MI.getNumOperands()-1).setImm(NewImm);
1923 }
1924
1925 // Returns true if this is a VPERMI2 or VPERMT2 instruction that can be
1926 // commuted.
isCommutableVPERMV3Instruction(unsigned Opcode)1927 static bool isCommutableVPERMV3Instruction(unsigned Opcode) {
1928 #define VPERM_CASES(Suffix) \
1929 case X86::VPERMI2##Suffix##128rr: case X86::VPERMT2##Suffix##128rr: \
1930 case X86::VPERMI2##Suffix##256rr: case X86::VPERMT2##Suffix##256rr: \
1931 case X86::VPERMI2##Suffix##rr: case X86::VPERMT2##Suffix##rr: \
1932 case X86::VPERMI2##Suffix##128rm: case X86::VPERMT2##Suffix##128rm: \
1933 case X86::VPERMI2##Suffix##256rm: case X86::VPERMT2##Suffix##256rm: \
1934 case X86::VPERMI2##Suffix##rm: case X86::VPERMT2##Suffix##rm: \
1935 case X86::VPERMI2##Suffix##128rrkz: case X86::VPERMT2##Suffix##128rrkz: \
1936 case X86::VPERMI2##Suffix##256rrkz: case X86::VPERMT2##Suffix##256rrkz: \
1937 case X86::VPERMI2##Suffix##rrkz: case X86::VPERMT2##Suffix##rrkz: \
1938 case X86::VPERMI2##Suffix##128rmkz: case X86::VPERMT2##Suffix##128rmkz: \
1939 case X86::VPERMI2##Suffix##256rmkz: case X86::VPERMT2##Suffix##256rmkz: \
1940 case X86::VPERMI2##Suffix##rmkz: case X86::VPERMT2##Suffix##rmkz:
1941
1942 #define VPERM_CASES_BROADCAST(Suffix) \
1943 VPERM_CASES(Suffix) \
1944 case X86::VPERMI2##Suffix##128rmb: case X86::VPERMT2##Suffix##128rmb: \
1945 case X86::VPERMI2##Suffix##256rmb: case X86::VPERMT2##Suffix##256rmb: \
1946 case X86::VPERMI2##Suffix##rmb: case X86::VPERMT2##Suffix##rmb: \
1947 case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \
1948 case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \
1949 case X86::VPERMI2##Suffix##rmbkz: case X86::VPERMT2##Suffix##rmbkz:
1950
1951 switch (Opcode) {
1952 default: return false;
1953 VPERM_CASES(B)
1954 VPERM_CASES_BROADCAST(D)
1955 VPERM_CASES_BROADCAST(PD)
1956 VPERM_CASES_BROADCAST(PS)
1957 VPERM_CASES_BROADCAST(Q)
1958 VPERM_CASES(W)
1959 return true;
1960 }
1961 #undef VPERM_CASES_BROADCAST
1962 #undef VPERM_CASES
1963 }
1964
1965 // Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching
1966 // from the I opcode to the T opcode and vice versa.
getCommutedVPERMV3Opcode(unsigned Opcode)1967 static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) {
1968 #define VPERM_CASES(Orig, New) \
1969 case X86::Orig##128rr: return X86::New##128rr; \
1970 case X86::Orig##128rrkz: return X86::New##128rrkz; \
1971 case X86::Orig##128rm: return X86::New##128rm; \
1972 case X86::Orig##128rmkz: return X86::New##128rmkz; \
1973 case X86::Orig##256rr: return X86::New##256rr; \
1974 case X86::Orig##256rrkz: return X86::New##256rrkz; \
1975 case X86::Orig##256rm: return X86::New##256rm; \
1976 case X86::Orig##256rmkz: return X86::New##256rmkz; \
1977 case X86::Orig##rr: return X86::New##rr; \
1978 case X86::Orig##rrkz: return X86::New##rrkz; \
1979 case X86::Orig##rm: return X86::New##rm; \
1980 case X86::Orig##rmkz: return X86::New##rmkz;
1981
1982 #define VPERM_CASES_BROADCAST(Orig, New) \
1983 VPERM_CASES(Orig, New) \
1984 case X86::Orig##128rmb: return X86::New##128rmb; \
1985 case X86::Orig##128rmbkz: return X86::New##128rmbkz; \
1986 case X86::Orig##256rmb: return X86::New##256rmb; \
1987 case X86::Orig##256rmbkz: return X86::New##256rmbkz; \
1988 case X86::Orig##rmb: return X86::New##rmb; \
1989 case X86::Orig##rmbkz: return X86::New##rmbkz;
1990
1991 switch (Opcode) {
1992 VPERM_CASES(VPERMI2B, VPERMT2B)
1993 VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D)
1994 VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD)
1995 VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS)
1996 VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q)
1997 VPERM_CASES(VPERMI2W, VPERMT2W)
1998 VPERM_CASES(VPERMT2B, VPERMI2B)
1999 VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D)
2000 VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD)
2001 VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS)
2002 VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q)
2003 VPERM_CASES(VPERMT2W, VPERMI2W)
2004 }
2005
2006 llvm_unreachable("Unreachable!");
2007 #undef VPERM_CASES_BROADCAST
2008 #undef VPERM_CASES
2009 }
2010
commuteInstructionImpl(MachineInstr & MI,bool NewMI,unsigned OpIdx1,unsigned OpIdx2) const2011 MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
2012 unsigned OpIdx1,
2013 unsigned OpIdx2) const {
2014 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
2015 if (NewMI)
2016 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
2017 return MI;
2018 };
2019
2020 switch (MI.getOpcode()) {
2021 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
2022 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
2023 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
2024 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
2025 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
2026 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
2027 unsigned Opc;
2028 unsigned Size;
2029 switch (MI.getOpcode()) {
2030 default: llvm_unreachable("Unreachable!");
2031 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
2032 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
2033 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
2034 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
2035 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
2036 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
2037 }
2038 unsigned Amt = MI.getOperand(3).getImm();
2039 auto &WorkingMI = cloneIfNew(MI);
2040 WorkingMI.setDesc(get(Opc));
2041 WorkingMI.getOperand(3).setImm(Size - Amt);
2042 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2043 OpIdx1, OpIdx2);
2044 }
2045 case X86::PFSUBrr:
2046 case X86::PFSUBRrr: {
2047 // PFSUB x, y: x = x - y
2048 // PFSUBR x, y: x = y - x
2049 unsigned Opc =
2050 (X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr);
2051 auto &WorkingMI = cloneIfNew(MI);
2052 WorkingMI.setDesc(get(Opc));
2053 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2054 OpIdx1, OpIdx2);
2055 }
2056 case X86::BLENDPDrri:
2057 case X86::BLENDPSrri:
2058 case X86::VBLENDPDrri:
2059 case X86::VBLENDPSrri:
2060 // If we're optimizing for size, try to use MOVSD/MOVSS.
2061 if (MI.getParent()->getParent()->getFunction().hasOptSize()) {
2062 unsigned Mask, Opc;
2063 switch (MI.getOpcode()) {
2064 default: llvm_unreachable("Unreachable!");
2065 case X86::BLENDPDrri: Opc = X86::MOVSDrr; Mask = 0x03; break;
2066 case X86::BLENDPSrri: Opc = X86::MOVSSrr; Mask = 0x0F; break;
2067 case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break;
2068 case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break;
2069 }
2070 if ((MI.getOperand(3).getImm() ^ Mask) == 1) {
2071 auto &WorkingMI = cloneIfNew(MI);
2072 WorkingMI.setDesc(get(Opc));
2073 WorkingMI.RemoveOperand(3);
2074 return TargetInstrInfo::commuteInstructionImpl(WorkingMI,
2075 /*NewMI=*/false,
2076 OpIdx1, OpIdx2);
2077 }
2078 }
2079 LLVM_FALLTHROUGH;
2080 case X86::PBLENDWrri:
2081 case X86::VBLENDPDYrri:
2082 case X86::VBLENDPSYrri:
2083 case X86::VPBLENDDrri:
2084 case X86::VPBLENDWrri:
2085 case X86::VPBLENDDYrri:
2086 case X86::VPBLENDWYrri:{
2087 int8_t Mask;
2088 switch (MI.getOpcode()) {
2089 default: llvm_unreachable("Unreachable!");
2090 case X86::BLENDPDrri: Mask = (int8_t)0x03; break;
2091 case X86::BLENDPSrri: Mask = (int8_t)0x0F; break;
2092 case X86::PBLENDWrri: Mask = (int8_t)0xFF; break;
2093 case X86::VBLENDPDrri: Mask = (int8_t)0x03; break;
2094 case X86::VBLENDPSrri: Mask = (int8_t)0x0F; break;
2095 case X86::VBLENDPDYrri: Mask = (int8_t)0x0F; break;
2096 case X86::VBLENDPSYrri: Mask = (int8_t)0xFF; break;
2097 case X86::VPBLENDDrri: Mask = (int8_t)0x0F; break;
2098 case X86::VPBLENDWrri: Mask = (int8_t)0xFF; break;
2099 case X86::VPBLENDDYrri: Mask = (int8_t)0xFF; break;
2100 case X86::VPBLENDWYrri: Mask = (int8_t)0xFF; break;
2101 }
2102 // Only the least significant bits of Imm are used.
2103 // Using int8_t to ensure it will be sign extended to the int64_t that
2104 // setImm takes in order to match isel behavior.
2105 int8_t Imm = MI.getOperand(3).getImm() & Mask;
2106 auto &WorkingMI = cloneIfNew(MI);
2107 WorkingMI.getOperand(3).setImm(Mask ^ Imm);
2108 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2109 OpIdx1, OpIdx2);
2110 }
2111 case X86::INSERTPSrr:
2112 case X86::VINSERTPSrr:
2113 case X86::VINSERTPSZrr: {
2114 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
2115 unsigned ZMask = Imm & 15;
2116 unsigned DstIdx = (Imm >> 4) & 3;
2117 unsigned SrcIdx = (Imm >> 6) & 3;
2118
2119 // We can commute insertps if we zero 2 of the elements, the insertion is
2120 // "inline" and we don't override the insertion with a zero.
2121 if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
2122 countPopulation(ZMask) == 2) {
2123 unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15);
2124 assert(AltIdx < 4 && "Illegal insertion index");
2125 unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
2126 auto &WorkingMI = cloneIfNew(MI);
2127 WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm);
2128 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2129 OpIdx1, OpIdx2);
2130 }
2131 return nullptr;
2132 }
2133 case X86::MOVSDrr:
2134 case X86::MOVSSrr:
2135 case X86::VMOVSDrr:
2136 case X86::VMOVSSrr:{
2137 // On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD.
2138 if (Subtarget.hasSSE41()) {
2139 unsigned Mask, Opc;
2140 switch (MI.getOpcode()) {
2141 default: llvm_unreachable("Unreachable!");
2142 case X86::MOVSDrr: Opc = X86::BLENDPDrri; Mask = 0x02; break;
2143 case X86::MOVSSrr: Opc = X86::BLENDPSrri; Mask = 0x0E; break;
2144 case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break;
2145 case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break;
2146 }
2147
2148 auto &WorkingMI = cloneIfNew(MI);
2149 WorkingMI.setDesc(get(Opc));
2150 WorkingMI.addOperand(MachineOperand::CreateImm(Mask));
2151 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2152 OpIdx1, OpIdx2);
2153 }
2154
2155 // Convert to SHUFPD.
2156 assert(MI.getOpcode() == X86::MOVSDrr &&
2157 "Can only commute MOVSDrr without SSE4.1");
2158
2159 auto &WorkingMI = cloneIfNew(MI);
2160 WorkingMI.setDesc(get(X86::SHUFPDrri));
2161 WorkingMI.addOperand(MachineOperand::CreateImm(0x02));
2162 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2163 OpIdx1, OpIdx2);
2164 }
2165 case X86::SHUFPDrri: {
2166 // Commute to MOVSD.
2167 assert(MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!");
2168 auto &WorkingMI = cloneIfNew(MI);
2169 WorkingMI.setDesc(get(X86::MOVSDrr));
2170 WorkingMI.RemoveOperand(3);
2171 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2172 OpIdx1, OpIdx2);
2173 }
2174 case X86::PCLMULQDQrr:
2175 case X86::VPCLMULQDQrr:
2176 case X86::VPCLMULQDQYrr:
2177 case X86::VPCLMULQDQZrr:
2178 case X86::VPCLMULQDQZ128rr:
2179 case X86::VPCLMULQDQZ256rr: {
2180 // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
2181 // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
2182 unsigned Imm = MI.getOperand(3).getImm();
2183 unsigned Src1Hi = Imm & 0x01;
2184 unsigned Src2Hi = Imm & 0x10;
2185 auto &WorkingMI = cloneIfNew(MI);
2186 WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
2187 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2188 OpIdx1, OpIdx2);
2189 }
2190 case X86::VPCMPBZ128rri: case X86::VPCMPUBZ128rri:
2191 case X86::VPCMPBZ256rri: case X86::VPCMPUBZ256rri:
2192 case X86::VPCMPBZrri: case X86::VPCMPUBZrri:
2193 case X86::VPCMPDZ128rri: case X86::VPCMPUDZ128rri:
2194 case X86::VPCMPDZ256rri: case X86::VPCMPUDZ256rri:
2195 case X86::VPCMPDZrri: case X86::VPCMPUDZrri:
2196 case X86::VPCMPQZ128rri: case X86::VPCMPUQZ128rri:
2197 case X86::VPCMPQZ256rri: case X86::VPCMPUQZ256rri:
2198 case X86::VPCMPQZrri: case X86::VPCMPUQZrri:
2199 case X86::VPCMPWZ128rri: case X86::VPCMPUWZ128rri:
2200 case X86::VPCMPWZ256rri: case X86::VPCMPUWZ256rri:
2201 case X86::VPCMPWZrri: case X86::VPCMPUWZrri:
2202 case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik:
2203 case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik:
2204 case X86::VPCMPBZrrik: case X86::VPCMPUBZrrik:
2205 case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik:
2206 case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik:
2207 case X86::VPCMPDZrrik: case X86::VPCMPUDZrrik:
2208 case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik:
2209 case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik:
2210 case X86::VPCMPQZrrik: case X86::VPCMPUQZrrik:
2211 case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik:
2212 case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik:
2213 case X86::VPCMPWZrrik: case X86::VPCMPUWZrrik: {
2214 // Flip comparison mode immediate (if necessary).
2215 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7;
2216 Imm = X86::getSwappedVPCMPImm(Imm);
2217 auto &WorkingMI = cloneIfNew(MI);
2218 WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm);
2219 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2220 OpIdx1, OpIdx2);
2221 }
2222 case X86::VPCOMBri: case X86::VPCOMUBri:
2223 case X86::VPCOMDri: case X86::VPCOMUDri:
2224 case X86::VPCOMQri: case X86::VPCOMUQri:
2225 case X86::VPCOMWri: case X86::VPCOMUWri: {
2226 // Flip comparison mode immediate (if necessary).
2227 unsigned Imm = MI.getOperand(3).getImm() & 0x7;
2228 Imm = X86::getSwappedVPCOMImm(Imm);
2229 auto &WorkingMI = cloneIfNew(MI);
2230 WorkingMI.getOperand(3).setImm(Imm);
2231 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2232 OpIdx1, OpIdx2);
2233 }
2234 case X86::VCMPSDZrr:
2235 case X86::VCMPSSZrr:
2236 case X86::VCMPPDZrri:
2237 case X86::VCMPPSZrri:
2238 case X86::VCMPPDZ128rri:
2239 case X86::VCMPPSZ128rri:
2240 case X86::VCMPPDZ256rri:
2241 case X86::VCMPPSZ256rri:
2242 case X86::VCMPPDZrrik:
2243 case X86::VCMPPSZrrik:
2244 case X86::VCMPPDZ128rrik:
2245 case X86::VCMPPSZ128rrik:
2246 case X86::VCMPPDZ256rrik:
2247 case X86::VCMPPSZ256rrik: {
2248 unsigned Imm =
2249 MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 0x1f;
2250 Imm = X86::getSwappedVCMPImm(Imm);
2251 auto &WorkingMI = cloneIfNew(MI);
2252 WorkingMI.getOperand(MI.getNumExplicitOperands() - 1).setImm(Imm);
2253 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2254 OpIdx1, OpIdx2);
2255 }
2256 case X86::VPERM2F128rr:
2257 case X86::VPERM2I128rr: {
2258 // Flip permute source immediate.
2259 // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
2260 // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
2261 int8_t Imm = MI.getOperand(3).getImm() & 0xFF;
2262 auto &WorkingMI = cloneIfNew(MI);
2263 WorkingMI.getOperand(3).setImm(Imm ^ 0x22);
2264 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2265 OpIdx1, OpIdx2);
2266 }
2267 case X86::MOVHLPSrr:
2268 case X86::UNPCKHPDrr:
2269 case X86::VMOVHLPSrr:
2270 case X86::VUNPCKHPDrr:
2271 case X86::VMOVHLPSZrr:
2272 case X86::VUNPCKHPDZ128rr: {
2273 assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!");
2274
2275 unsigned Opc = MI.getOpcode();
2276 switch (Opc) {
2277 default: llvm_unreachable("Unreachable!");
2278 case X86::MOVHLPSrr: Opc = X86::UNPCKHPDrr; break;
2279 case X86::UNPCKHPDrr: Opc = X86::MOVHLPSrr; break;
2280 case X86::VMOVHLPSrr: Opc = X86::VUNPCKHPDrr; break;
2281 case X86::VUNPCKHPDrr: Opc = X86::VMOVHLPSrr; break;
2282 case X86::VMOVHLPSZrr: Opc = X86::VUNPCKHPDZ128rr; break;
2283 case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr; break;
2284 }
2285 auto &WorkingMI = cloneIfNew(MI);
2286 WorkingMI.setDesc(get(Opc));
2287 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2288 OpIdx1, OpIdx2);
2289 }
2290 case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: {
2291 auto &WorkingMI = cloneIfNew(MI);
2292 unsigned OpNo = MI.getDesc().getNumOperands() - 1;
2293 X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm());
2294 WorkingMI.getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC));
2295 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2296 OpIdx1, OpIdx2);
2297 }
2298 case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi:
2299 case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi:
2300 case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi:
2301 case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi:
2302 case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi:
2303 case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi:
2304 case X86::VPTERNLOGDZrrik:
2305 case X86::VPTERNLOGDZ128rrik:
2306 case X86::VPTERNLOGDZ256rrik:
2307 case X86::VPTERNLOGQZrrik:
2308 case X86::VPTERNLOGQZ128rrik:
2309 case X86::VPTERNLOGQZ256rrik:
2310 case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz:
2311 case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
2312 case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
2313 case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz:
2314 case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
2315 case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
2316 case X86::VPTERNLOGDZ128rmbi:
2317 case X86::VPTERNLOGDZ256rmbi:
2318 case X86::VPTERNLOGDZrmbi:
2319 case X86::VPTERNLOGQZ128rmbi:
2320 case X86::VPTERNLOGQZ256rmbi:
2321 case X86::VPTERNLOGQZrmbi:
2322 case X86::VPTERNLOGDZ128rmbikz:
2323 case X86::VPTERNLOGDZ256rmbikz:
2324 case X86::VPTERNLOGDZrmbikz:
2325 case X86::VPTERNLOGQZ128rmbikz:
2326 case X86::VPTERNLOGQZ256rmbikz:
2327 case X86::VPTERNLOGQZrmbikz: {
2328 auto &WorkingMI = cloneIfNew(MI);
2329 commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2);
2330 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2331 OpIdx1, OpIdx2);
2332 }
2333 default: {
2334 if (isCommutableVPERMV3Instruction(MI.getOpcode())) {
2335 unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode());
2336 auto &WorkingMI = cloneIfNew(MI);
2337 WorkingMI.setDesc(get(Opc));
2338 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2339 OpIdx1, OpIdx2);
2340 }
2341
2342 const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
2343 MI.getDesc().TSFlags);
2344 if (FMA3Group) {
2345 unsigned Opc =
2346 getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group);
2347 auto &WorkingMI = cloneIfNew(MI);
2348 WorkingMI.setDesc(get(Opc));
2349 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2350 OpIdx1, OpIdx2);
2351 }
2352
2353 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2354 }
2355 }
2356 }
2357
2358 bool
findThreeSrcCommutedOpIndices(const MachineInstr & MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2,bool IsIntrinsic) const2359 X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI,
2360 unsigned &SrcOpIdx1,
2361 unsigned &SrcOpIdx2,
2362 bool IsIntrinsic) const {
2363 uint64_t TSFlags = MI.getDesc().TSFlags;
2364
2365 unsigned FirstCommutableVecOp = 1;
2366 unsigned LastCommutableVecOp = 3;
2367 unsigned KMaskOp = -1U;
2368 if (X86II::isKMasked(TSFlags)) {
2369 // For k-zero-masked operations it is Ok to commute the first vector
2370 // operand. Unless this is an intrinsic instruction.
2371 // For regular k-masked operations a conservative choice is done as the
2372 // elements of the first vector operand, for which the corresponding bit
2373 // in the k-mask operand is set to 0, are copied to the result of the
2374 // instruction.
2375 // TODO/FIXME: The commute still may be legal if it is known that the
2376 // k-mask operand is set to either all ones or all zeroes.
2377 // It is also Ok to commute the 1st operand if all users of MI use only
2378 // the elements enabled by the k-mask operand. For example,
2379 // v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i]
2380 // : v1[i];
2381 // VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 ->
2382 // // Ok, to commute v1 in FMADD213PSZrk.
2383
2384 // The k-mask operand has index = 2 for masked and zero-masked operations.
2385 KMaskOp = 2;
2386
2387 // The operand with index = 1 is used as a source for those elements for
2388 // which the corresponding bit in the k-mask is set to 0.
2389 if (X86II::isKMergeMasked(TSFlags) || IsIntrinsic)
2390 FirstCommutableVecOp = 3;
2391
2392 LastCommutableVecOp++;
2393 } else if (IsIntrinsic) {
2394 // Commuting the first operand of an intrinsic instruction isn't possible
2395 // unless we can prove that only the lowest element of the result is used.
2396 FirstCommutableVecOp = 2;
2397 }
2398
2399 if (isMem(MI, LastCommutableVecOp))
2400 LastCommutableVecOp--;
2401
2402 // Only the first RegOpsNum operands are commutable.
2403 // Also, the value 'CommuteAnyOperandIndex' is valid here as it means
2404 // that the operand is not specified/fixed.
2405 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2406 (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
2407 SrcOpIdx1 == KMaskOp))
2408 return false;
2409 if (SrcOpIdx2 != CommuteAnyOperandIndex &&
2410 (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
2411 SrcOpIdx2 == KMaskOp))
2412 return false;
2413
2414 // Look for two different register operands assumed to be commutable
2415 // regardless of the FMA opcode. The FMA opcode is adjusted later.
2416 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2417 SrcOpIdx2 == CommuteAnyOperandIndex) {
2418 unsigned CommutableOpIdx2 = SrcOpIdx2;
2419
2420 // At least one of operands to be commuted is not specified and
2421 // this method is free to choose appropriate commutable operands.
2422 if (SrcOpIdx1 == SrcOpIdx2)
2423 // Both of operands are not fixed. By default set one of commutable
2424 // operands to the last register operand of the instruction.
2425 CommutableOpIdx2 = LastCommutableVecOp;
2426 else if (SrcOpIdx2 == CommuteAnyOperandIndex)
2427 // Only one of operands is not fixed.
2428 CommutableOpIdx2 = SrcOpIdx1;
2429
2430 // CommutableOpIdx2 is well defined now. Let's choose another commutable
2431 // operand and assign its index to CommutableOpIdx1.
2432 Register Op2Reg = MI.getOperand(CommutableOpIdx2).getReg();
2433
2434 unsigned CommutableOpIdx1;
2435 for (CommutableOpIdx1 = LastCommutableVecOp;
2436 CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
2437 // Just ignore and skip the k-mask operand.
2438 if (CommutableOpIdx1 == KMaskOp)
2439 continue;
2440
2441 // The commuted operands must have different registers.
2442 // Otherwise, the commute transformation does not change anything and
2443 // is useless then.
2444 if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg())
2445 break;
2446 }
2447
2448 // No appropriate commutable operands were found.
2449 if (CommutableOpIdx1 < FirstCommutableVecOp)
2450 return false;
2451
2452 // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2
2453 // to return those values.
2454 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2455 CommutableOpIdx1, CommutableOpIdx2))
2456 return false;
2457 }
2458
2459 return true;
2460 }
2461
findCommutedOpIndices(const MachineInstr & MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2) const2462 bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
2463 unsigned &SrcOpIdx1,
2464 unsigned &SrcOpIdx2) const {
2465 const MCInstrDesc &Desc = MI.getDesc();
2466 if (!Desc.isCommutable())
2467 return false;
2468
2469 switch (MI.getOpcode()) {
2470 case X86::CMPSDrr:
2471 case X86::CMPSSrr:
2472 case X86::CMPPDrri:
2473 case X86::CMPPSrri:
2474 case X86::VCMPSDrr:
2475 case X86::VCMPSSrr:
2476 case X86::VCMPPDrri:
2477 case X86::VCMPPSrri:
2478 case X86::VCMPPDYrri:
2479 case X86::VCMPPSYrri:
2480 case X86::VCMPSDZrr:
2481 case X86::VCMPSSZrr:
2482 case X86::VCMPPDZrri:
2483 case X86::VCMPPSZrri:
2484 case X86::VCMPPDZ128rri:
2485 case X86::VCMPPSZ128rri:
2486 case X86::VCMPPDZ256rri:
2487 case X86::VCMPPSZ256rri:
2488 case X86::VCMPPDZrrik:
2489 case X86::VCMPPSZrrik:
2490 case X86::VCMPPDZ128rrik:
2491 case X86::VCMPPSZ128rrik:
2492 case X86::VCMPPDZ256rrik:
2493 case X86::VCMPPSZ256rrik: {
2494 unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0;
2495
2496 // Float comparison can be safely commuted for
2497 // Ordered/Unordered/Equal/NotEqual tests
2498 unsigned Imm = MI.getOperand(3 + OpOffset).getImm() & 0x7;
2499 switch (Imm) {
2500 default:
2501 // EVEX versions can be commuted.
2502 if ((Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX)
2503 break;
2504 return false;
2505 case 0x00: // EQUAL
2506 case 0x03: // UNORDERED
2507 case 0x04: // NOT EQUAL
2508 case 0x07: // ORDERED
2509 break;
2510 }
2511
2512 // The indices of the commutable operands are 1 and 2 (or 2 and 3
2513 // when masked).
2514 // Assign them to the returned operand indices here.
2515 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset,
2516 2 + OpOffset);
2517 }
2518 case X86::MOVSSrr:
2519 // X86::MOVSDrr is always commutable. MOVSS is only commutable if we can
2520 // form sse4.1 blend. We assume VMOVSSrr/VMOVSDrr is always commutable since
2521 // AVX implies sse4.1.
2522 if (Subtarget.hasSSE41())
2523 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2524 return false;
2525 case X86::SHUFPDrri:
2526 // We can commute this to MOVSD.
2527 if (MI.getOperand(3).getImm() == 0x02)
2528 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2529 return false;
2530 case X86::MOVHLPSrr:
2531 case X86::UNPCKHPDrr:
2532 case X86::VMOVHLPSrr:
2533 case X86::VUNPCKHPDrr:
2534 case X86::VMOVHLPSZrr:
2535 case X86::VUNPCKHPDZ128rr:
2536 if (Subtarget.hasSSE2())
2537 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2538 return false;
2539 case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi:
2540 case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi:
2541 case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi:
2542 case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi:
2543 case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi:
2544 case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi:
2545 case X86::VPTERNLOGDZrrik:
2546 case X86::VPTERNLOGDZ128rrik:
2547 case X86::VPTERNLOGDZ256rrik:
2548 case X86::VPTERNLOGQZrrik:
2549 case X86::VPTERNLOGQZ128rrik:
2550 case X86::VPTERNLOGQZ256rrik:
2551 case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz:
2552 case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
2553 case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
2554 case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz:
2555 case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
2556 case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
2557 case X86::VPTERNLOGDZ128rmbi:
2558 case X86::VPTERNLOGDZ256rmbi:
2559 case X86::VPTERNLOGDZrmbi:
2560 case X86::VPTERNLOGQZ128rmbi:
2561 case X86::VPTERNLOGQZ256rmbi:
2562 case X86::VPTERNLOGQZrmbi:
2563 case X86::VPTERNLOGDZ128rmbikz:
2564 case X86::VPTERNLOGDZ256rmbikz:
2565 case X86::VPTERNLOGDZrmbikz:
2566 case X86::VPTERNLOGQZ128rmbikz:
2567 case X86::VPTERNLOGQZ256rmbikz:
2568 case X86::VPTERNLOGQZrmbikz:
2569 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2570 case X86::VPDPWSSDZ128r:
2571 case X86::VPDPWSSDZ128rk:
2572 case X86::VPDPWSSDZ128rkz:
2573 case X86::VPDPWSSDZ256r:
2574 case X86::VPDPWSSDZ256rk:
2575 case X86::VPDPWSSDZ256rkz:
2576 case X86::VPDPWSSDZr:
2577 case X86::VPDPWSSDZrk:
2578 case X86::VPDPWSSDZrkz:
2579 case X86::VPDPWSSDSZ128r:
2580 case X86::VPDPWSSDSZ128rk:
2581 case X86::VPDPWSSDSZ128rkz:
2582 case X86::VPDPWSSDSZ256r:
2583 case X86::VPDPWSSDSZ256rk:
2584 case X86::VPDPWSSDSZ256rkz:
2585 case X86::VPDPWSSDSZr:
2586 case X86::VPDPWSSDSZrk:
2587 case X86::VPDPWSSDSZrkz:
2588 case X86::VPMADD52HUQZ128r:
2589 case X86::VPMADD52HUQZ128rk:
2590 case X86::VPMADD52HUQZ128rkz:
2591 case X86::VPMADD52HUQZ256r:
2592 case X86::VPMADD52HUQZ256rk:
2593 case X86::VPMADD52HUQZ256rkz:
2594 case X86::VPMADD52HUQZr:
2595 case X86::VPMADD52HUQZrk:
2596 case X86::VPMADD52HUQZrkz:
2597 case X86::VPMADD52LUQZ128r:
2598 case X86::VPMADD52LUQZ128rk:
2599 case X86::VPMADD52LUQZ128rkz:
2600 case X86::VPMADD52LUQZ256r:
2601 case X86::VPMADD52LUQZ256rk:
2602 case X86::VPMADD52LUQZ256rkz:
2603 case X86::VPMADD52LUQZr:
2604 case X86::VPMADD52LUQZrk:
2605 case X86::VPMADD52LUQZrkz: {
2606 unsigned CommutableOpIdx1 = 2;
2607 unsigned CommutableOpIdx2 = 3;
2608 if (X86II::isKMasked(Desc.TSFlags)) {
2609 // Skip the mask register.
2610 ++CommutableOpIdx1;
2611 ++CommutableOpIdx2;
2612 }
2613 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2614 CommutableOpIdx1, CommutableOpIdx2))
2615 return false;
2616 if (!MI.getOperand(SrcOpIdx1).isReg() ||
2617 !MI.getOperand(SrcOpIdx2).isReg())
2618 // No idea.
2619 return false;
2620 return true;
2621 }
2622
2623 default:
2624 const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
2625 MI.getDesc().TSFlags);
2626 if (FMA3Group)
2627 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2,
2628 FMA3Group->isIntrinsic());
2629
2630 // Handled masked instructions since we need to skip over the mask input
2631 // and the preserved input.
2632 if (X86II::isKMasked(Desc.TSFlags)) {
2633 // First assume that the first input is the mask operand and skip past it.
2634 unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1;
2635 unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2;
2636 // Check if the first input is tied. If there isn't one then we only
2637 // need to skip the mask operand which we did above.
2638 if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(),
2639 MCOI::TIED_TO) != -1)) {
2640 // If this is zero masking instruction with a tied operand, we need to
2641 // move the first index back to the first input since this must
2642 // be a 3 input instruction and we want the first two non-mask inputs.
2643 // Otherwise this is a 2 input instruction with a preserved input and
2644 // mask, so we need to move the indices to skip one more input.
2645 if (X86II::isKMergeMasked(Desc.TSFlags)) {
2646 ++CommutableOpIdx1;
2647 ++CommutableOpIdx2;
2648 } else {
2649 --CommutableOpIdx1;
2650 }
2651 }
2652
2653 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2654 CommutableOpIdx1, CommutableOpIdx2))
2655 return false;
2656
2657 if (!MI.getOperand(SrcOpIdx1).isReg() ||
2658 !MI.getOperand(SrcOpIdx2).isReg())
2659 // No idea.
2660 return false;
2661 return true;
2662 }
2663
2664 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2665 }
2666 return false;
2667 }
2668
getCondFromBranch(const MachineInstr & MI)2669 X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) {
2670 switch (MI.getOpcode()) {
2671 default: return X86::COND_INVALID;
2672 case X86::JCC_1:
2673 return static_cast<X86::CondCode>(
2674 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2675 }
2676 }
2677
2678 /// Return condition code of a SETCC opcode.
getCondFromSETCC(const MachineInstr & MI)2679 X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) {
2680 switch (MI.getOpcode()) {
2681 default: return X86::COND_INVALID;
2682 case X86::SETCCr: case X86::SETCCm:
2683 return static_cast<X86::CondCode>(
2684 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2685 }
2686 }
2687
2688 /// Return condition code of a CMov opcode.
getCondFromCMov(const MachineInstr & MI)2689 X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) {
2690 switch (MI.getOpcode()) {
2691 default: return X86::COND_INVALID;
2692 case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr:
2693 case X86::CMOV16rm: case X86::CMOV32rm: case X86::CMOV64rm:
2694 return static_cast<X86::CondCode>(
2695 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2696 }
2697 }
2698
2699 /// Return the inverse of the specified condition,
2700 /// e.g. turning COND_E to COND_NE.
GetOppositeBranchCondition(X86::CondCode CC)2701 X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
2702 switch (CC) {
2703 default: llvm_unreachable("Illegal condition code!");
2704 case X86::COND_E: return X86::COND_NE;
2705 case X86::COND_NE: return X86::COND_E;
2706 case X86::COND_L: return X86::COND_GE;
2707 case X86::COND_LE: return X86::COND_G;
2708 case X86::COND_G: return X86::COND_LE;
2709 case X86::COND_GE: return X86::COND_L;
2710 case X86::COND_B: return X86::COND_AE;
2711 case X86::COND_BE: return X86::COND_A;
2712 case X86::COND_A: return X86::COND_BE;
2713 case X86::COND_AE: return X86::COND_B;
2714 case X86::COND_S: return X86::COND_NS;
2715 case X86::COND_NS: return X86::COND_S;
2716 case X86::COND_P: return X86::COND_NP;
2717 case X86::COND_NP: return X86::COND_P;
2718 case X86::COND_O: return X86::COND_NO;
2719 case X86::COND_NO: return X86::COND_O;
2720 case X86::COND_NE_OR_P: return X86::COND_E_AND_NP;
2721 case X86::COND_E_AND_NP: return X86::COND_NE_OR_P;
2722 }
2723 }
2724
2725 /// Assuming the flags are set by MI(a,b), return the condition code if we
2726 /// modify the instructions such that flags are set by MI(b,a).
getSwappedCondition(X86::CondCode CC)2727 static X86::CondCode getSwappedCondition(X86::CondCode CC) {
2728 switch (CC) {
2729 default: return X86::COND_INVALID;
2730 case X86::COND_E: return X86::COND_E;
2731 case X86::COND_NE: return X86::COND_NE;
2732 case X86::COND_L: return X86::COND_G;
2733 case X86::COND_LE: return X86::COND_GE;
2734 case X86::COND_G: return X86::COND_L;
2735 case X86::COND_GE: return X86::COND_LE;
2736 case X86::COND_B: return X86::COND_A;
2737 case X86::COND_BE: return X86::COND_AE;
2738 case X86::COND_A: return X86::COND_B;
2739 case X86::COND_AE: return X86::COND_BE;
2740 }
2741 }
2742
2743 std::pair<X86::CondCode, bool>
getX86ConditionCode(CmpInst::Predicate Predicate)2744 X86::getX86ConditionCode(CmpInst::Predicate Predicate) {
2745 X86::CondCode CC = X86::COND_INVALID;
2746 bool NeedSwap = false;
2747 switch (Predicate) {
2748 default: break;
2749 // Floating-point Predicates
2750 case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
2751 case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH;
2752 case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
2753 case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH;
2754 case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
2755 case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH;
2756 case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
2757 case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH;
2758 case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
2759 case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
2760 case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
2761 case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
2762 case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH;
2763 case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
2764
2765 // Integer Predicates
2766 case CmpInst::ICMP_EQ: CC = X86::COND_E; break;
2767 case CmpInst::ICMP_NE: CC = X86::COND_NE; break;
2768 case CmpInst::ICMP_UGT: CC = X86::COND_A; break;
2769 case CmpInst::ICMP_UGE: CC = X86::COND_AE; break;
2770 case CmpInst::ICMP_ULT: CC = X86::COND_B; break;
2771 case CmpInst::ICMP_ULE: CC = X86::COND_BE; break;
2772 case CmpInst::ICMP_SGT: CC = X86::COND_G; break;
2773 case CmpInst::ICMP_SGE: CC = X86::COND_GE; break;
2774 case CmpInst::ICMP_SLT: CC = X86::COND_L; break;
2775 case CmpInst::ICMP_SLE: CC = X86::COND_LE; break;
2776 }
2777
2778 return std::make_pair(CC, NeedSwap);
2779 }
2780
2781 /// Return a setcc opcode based on whether it has memory operand.
getSETOpc(bool HasMemoryOperand)2782 unsigned X86::getSETOpc(bool HasMemoryOperand) {
2783 return HasMemoryOperand ? X86::SETCCr : X86::SETCCm;
2784 }
2785
2786 /// Return a cmov opcode for the given register size in bytes, and operand type.
getCMovOpcode(unsigned RegBytes,bool HasMemoryOperand)2787 unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand) {
2788 switch(RegBytes) {
2789 default: llvm_unreachable("Illegal register size!");
2790 case 2: return HasMemoryOperand ? X86::CMOV16rm : X86::CMOV16rr;
2791 case 4: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV32rr;
2792 case 8: return HasMemoryOperand ? X86::CMOV64rm : X86::CMOV64rr;
2793 }
2794 }
2795
2796 /// Get the VPCMP immediate for the given condition.
getVPCMPImmForCond(ISD::CondCode CC)2797 unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) {
2798 switch (CC) {
2799 default: llvm_unreachable("Unexpected SETCC condition");
2800 case ISD::SETNE: return 4;
2801 case ISD::SETEQ: return 0;
2802 case ISD::SETULT:
2803 case ISD::SETLT: return 1;
2804 case ISD::SETUGT:
2805 case ISD::SETGT: return 6;
2806 case ISD::SETUGE:
2807 case ISD::SETGE: return 5;
2808 case ISD::SETULE:
2809 case ISD::SETLE: return 2;
2810 }
2811 }
2812
2813 /// Get the VPCMP immediate if the operands are swapped.
getSwappedVPCMPImm(unsigned Imm)2814 unsigned X86::getSwappedVPCMPImm(unsigned Imm) {
2815 switch (Imm) {
2816 default: llvm_unreachable("Unreachable!");
2817 case 0x01: Imm = 0x06; break; // LT -> NLE
2818 case 0x02: Imm = 0x05; break; // LE -> NLT
2819 case 0x05: Imm = 0x02; break; // NLT -> LE
2820 case 0x06: Imm = 0x01; break; // NLE -> LT
2821 case 0x00: // EQ
2822 case 0x03: // FALSE
2823 case 0x04: // NE
2824 case 0x07: // TRUE
2825 break;
2826 }
2827
2828 return Imm;
2829 }
2830
2831 /// Get the VPCOM immediate if the operands are swapped.
getSwappedVPCOMImm(unsigned Imm)2832 unsigned X86::getSwappedVPCOMImm(unsigned Imm) {
2833 switch (Imm) {
2834 default: llvm_unreachable("Unreachable!");
2835 case 0x00: Imm = 0x02; break; // LT -> GT
2836 case 0x01: Imm = 0x03; break; // LE -> GE
2837 case 0x02: Imm = 0x00; break; // GT -> LT
2838 case 0x03: Imm = 0x01; break; // GE -> LE
2839 case 0x04: // EQ
2840 case 0x05: // NE
2841 case 0x06: // FALSE
2842 case 0x07: // TRUE
2843 break;
2844 }
2845
2846 return Imm;
2847 }
2848
2849 /// Get the VCMP immediate if the operands are swapped.
getSwappedVCMPImm(unsigned Imm)2850 unsigned X86::getSwappedVCMPImm(unsigned Imm) {
2851 // Only need the lower 2 bits to distinquish.
2852 switch (Imm & 0x3) {
2853 default: llvm_unreachable("Unreachable!");
2854 case 0x00: case 0x03:
2855 // EQ/NE/TRUE/FALSE/ORD/UNORD don't change immediate when commuted.
2856 break;
2857 case 0x01: case 0x02:
2858 // Need to toggle bits 3:0. Bit 4 stays the same.
2859 Imm ^= 0xf;
2860 break;
2861 }
2862
2863 return Imm;
2864 }
2865
isUnconditionalTailCall(const MachineInstr & MI) const2866 bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {
2867 switch (MI.getOpcode()) {
2868 case X86::TCRETURNdi:
2869 case X86::TCRETURNri:
2870 case X86::TCRETURNmi:
2871 case X86::TCRETURNdi64:
2872 case X86::TCRETURNri64:
2873 case X86::TCRETURNmi64:
2874 return true;
2875 default:
2876 return false;
2877 }
2878 }
2879
canMakeTailCallConditional(SmallVectorImpl<MachineOperand> & BranchCond,const MachineInstr & TailCall) const2880 bool X86InstrInfo::canMakeTailCallConditional(
2881 SmallVectorImpl<MachineOperand> &BranchCond,
2882 const MachineInstr &TailCall) const {
2883 if (TailCall.getOpcode() != X86::TCRETURNdi &&
2884 TailCall.getOpcode() != X86::TCRETURNdi64) {
2885 // Only direct calls can be done with a conditional branch.
2886 return false;
2887 }
2888
2889 const MachineFunction *MF = TailCall.getParent()->getParent();
2890 if (Subtarget.isTargetWin64() && MF->hasWinCFI()) {
2891 // Conditional tail calls confuse the Win64 unwinder.
2892 return false;
2893 }
2894
2895 assert(BranchCond.size() == 1);
2896 if (BranchCond[0].getImm() > X86::LAST_VALID_COND) {
2897 // Can't make a conditional tail call with this condition.
2898 return false;
2899 }
2900
2901 const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
2902 if (X86FI->getTCReturnAddrDelta() != 0 ||
2903 TailCall.getOperand(1).getImm() != 0) {
2904 // A conditional tail call cannot do any stack adjustment.
2905 return false;
2906 }
2907
2908 return true;
2909 }
2910
replaceBranchWithTailCall(MachineBasicBlock & MBB,SmallVectorImpl<MachineOperand> & BranchCond,const MachineInstr & TailCall) const2911 void X86InstrInfo::replaceBranchWithTailCall(
2912 MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
2913 const MachineInstr &TailCall) const {
2914 assert(canMakeTailCallConditional(BranchCond, TailCall));
2915
2916 MachineBasicBlock::iterator I = MBB.end();
2917 while (I != MBB.begin()) {
2918 --I;
2919 if (I->isDebugInstr())
2920 continue;
2921 if (!I->isBranch())
2922 assert(0 && "Can't find the branch to replace!");
2923
2924 X86::CondCode CC = X86::getCondFromBranch(*I);
2925 assert(BranchCond.size() == 1);
2926 if (CC != BranchCond[0].getImm())
2927 continue;
2928
2929 break;
2930 }
2931
2932 unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
2933 : X86::TCRETURNdi64cc;
2934
2935 auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc));
2936 MIB->addOperand(TailCall.getOperand(0)); // Destination.
2937 MIB.addImm(0); // Stack offset (not used).
2938 MIB->addOperand(BranchCond[0]); // Condition.
2939 MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters.
2940
2941 // Add implicit uses and defs of all live regs potentially clobbered by the
2942 // call. This way they still appear live across the call.
2943 LivePhysRegs LiveRegs(getRegisterInfo());
2944 LiveRegs.addLiveOuts(MBB);
2945 SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers;
2946 LiveRegs.stepForward(*MIB, Clobbers);
2947 for (const auto &C : Clobbers) {
2948 MIB.addReg(C.first, RegState::Implicit);
2949 MIB.addReg(C.first, RegState::Implicit | RegState::Define);
2950 }
2951
2952 I->eraseFromParent();
2953 }
2954
2955 // Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
2956 // not be a fallthrough MBB now due to layout changes). Return nullptr if the
2957 // fallthrough MBB cannot be identified.
getFallThroughMBB(MachineBasicBlock * MBB,MachineBasicBlock * TBB)2958 static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
2959 MachineBasicBlock *TBB) {
2960 // Look for non-EHPad successors other than TBB. If we find exactly one, it
2961 // is the fallthrough MBB. If we find zero, then TBB is both the target MBB
2962 // and fallthrough MBB. If we find more than one, we cannot identify the
2963 // fallthrough MBB and should return nullptr.
2964 MachineBasicBlock *FallthroughBB = nullptr;
2965 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) {
2966 if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB))
2967 continue;
2968 // Return a nullptr if we found more than one fallthrough successor.
2969 if (FallthroughBB && FallthroughBB != TBB)
2970 return nullptr;
2971 FallthroughBB = *SI;
2972 }
2973 return FallthroughBB;
2974 }
2975
AnalyzeBranchImpl(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,SmallVectorImpl<MachineInstr * > & CondBranches,bool AllowModify) const2976 bool X86InstrInfo::AnalyzeBranchImpl(
2977 MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
2978 SmallVectorImpl<MachineOperand> &Cond,
2979 SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const {
2980
2981 // Start from the bottom of the block and work up, examining the
2982 // terminator instructions.
2983 MachineBasicBlock::iterator I = MBB.end();
2984 MachineBasicBlock::iterator UnCondBrIter = MBB.end();
2985 while (I != MBB.begin()) {
2986 --I;
2987 if (I->isDebugInstr())
2988 continue;
2989
2990 // Working from the bottom, when we see a non-terminator instruction, we're
2991 // done.
2992 if (!isUnpredicatedTerminator(*I))
2993 break;
2994
2995 // A terminator that isn't a branch can't easily be handled by this
2996 // analysis.
2997 if (!I->isBranch())
2998 return true;
2999
3000 // Handle unconditional branches.
3001 if (I->getOpcode() == X86::JMP_1) {
3002 UnCondBrIter = I;
3003
3004 if (!AllowModify) {
3005 TBB = I->getOperand(0).getMBB();
3006 continue;
3007 }
3008
3009 // If the block has any instructions after a JMP, delete them.
3010 while (std::next(I) != MBB.end())
3011 std::next(I)->eraseFromParent();
3012
3013 Cond.clear();
3014 FBB = nullptr;
3015
3016 // Delete the JMP if it's equivalent to a fall-through.
3017 if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
3018 TBB = nullptr;
3019 I->eraseFromParent();
3020 I = MBB.end();
3021 UnCondBrIter = MBB.end();
3022 continue;
3023 }
3024
3025 // TBB is used to indicate the unconditional destination.
3026 TBB = I->getOperand(0).getMBB();
3027 continue;
3028 }
3029
3030 // Handle conditional branches.
3031 X86::CondCode BranchCode = X86::getCondFromBranch(*I);
3032 if (BranchCode == X86::COND_INVALID)
3033 return true; // Can't handle indirect branch.
3034
3035 // In practice we should never have an undef eflags operand, if we do
3036 // abort here as we are not prepared to preserve the flag.
3037 if (I->findRegisterUseOperand(X86::EFLAGS)->isUndef())
3038 return true;
3039
3040 // Working from the bottom, handle the first conditional branch.
3041 if (Cond.empty()) {
3042 MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
3043 if (AllowModify && UnCondBrIter != MBB.end() &&
3044 MBB.isLayoutSuccessor(TargetBB)) {
3045 // If we can modify the code and it ends in something like:
3046 //
3047 // jCC L1
3048 // jmp L2
3049 // L1:
3050 // ...
3051 // L2:
3052 //
3053 // Then we can change this to:
3054 //
3055 // jnCC L2
3056 // L1:
3057 // ...
3058 // L2:
3059 //
3060 // Which is a bit more efficient.
3061 // We conditionally jump to the fall-through block.
3062 BranchCode = GetOppositeBranchCondition(BranchCode);
3063 MachineBasicBlock::iterator OldInst = I;
3064
3065 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JCC_1))
3066 .addMBB(UnCondBrIter->getOperand(0).getMBB())
3067 .addImm(BranchCode);
3068 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1))
3069 .addMBB(TargetBB);
3070
3071 OldInst->eraseFromParent();
3072 UnCondBrIter->eraseFromParent();
3073
3074 // Restart the analysis.
3075 UnCondBrIter = MBB.end();
3076 I = MBB.end();
3077 continue;
3078 }
3079
3080 FBB = TBB;
3081 TBB = I->getOperand(0).getMBB();
3082 Cond.push_back(MachineOperand::CreateImm(BranchCode));
3083 CondBranches.push_back(&*I);
3084 continue;
3085 }
3086
3087 // Handle subsequent conditional branches. Only handle the case where all
3088 // conditional branches branch to the same destination and their condition
3089 // opcodes fit one of the special multi-branch idioms.
3090 assert(Cond.size() == 1);
3091 assert(TBB);
3092
3093 // If the conditions are the same, we can leave them alone.
3094 X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
3095 auto NewTBB = I->getOperand(0).getMBB();
3096 if (OldBranchCode == BranchCode && TBB == NewTBB)
3097 continue;
3098
3099 // If they differ, see if they fit one of the known patterns. Theoretically,
3100 // we could handle more patterns here, but we shouldn't expect to see them
3101 // if instruction selection has done a reasonable job.
3102 if (TBB == NewTBB &&
3103 ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) ||
3104 (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) {
3105 BranchCode = X86::COND_NE_OR_P;
3106 } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) ||
3107 (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) {
3108 if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB)))
3109 return true;
3110
3111 // X86::COND_E_AND_NP usually has two different branch destinations.
3112 //
3113 // JP B1
3114 // JE B2
3115 // JMP B1
3116 // B1:
3117 // B2:
3118 //
3119 // Here this condition branches to B2 only if NP && E. It has another
3120 // equivalent form:
3121 //
3122 // JNE B1
3123 // JNP B2
3124 // JMP B1
3125 // B1:
3126 // B2:
3127 //
3128 // Similarly it branches to B2 only if E && NP. That is why this condition
3129 // is named with COND_E_AND_NP.
3130 BranchCode = X86::COND_E_AND_NP;
3131 } else
3132 return true;
3133
3134 // Update the MachineOperand.
3135 Cond[0].setImm(BranchCode);
3136 CondBranches.push_back(&*I);
3137 }
3138
3139 return false;
3140 }
3141
analyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const3142 bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
3143 MachineBasicBlock *&TBB,
3144 MachineBasicBlock *&FBB,
3145 SmallVectorImpl<MachineOperand> &Cond,
3146 bool AllowModify) const {
3147 SmallVector<MachineInstr *, 4> CondBranches;
3148 return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
3149 }
3150
analyzeBranchPredicate(MachineBasicBlock & MBB,MachineBranchPredicate & MBP,bool AllowModify) const3151 bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
3152 MachineBranchPredicate &MBP,
3153 bool AllowModify) const {
3154 using namespace std::placeholders;
3155
3156 SmallVector<MachineOperand, 4> Cond;
3157 SmallVector<MachineInstr *, 4> CondBranches;
3158 if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches,
3159 AllowModify))
3160 return true;
3161
3162 if (Cond.size() != 1)
3163 return true;
3164
3165 assert(MBP.TrueDest && "expected!");
3166
3167 if (!MBP.FalseDest)
3168 MBP.FalseDest = MBB.getNextNode();
3169
3170 const TargetRegisterInfo *TRI = &getRegisterInfo();
3171
3172 MachineInstr *ConditionDef = nullptr;
3173 bool SingleUseCondition = true;
3174
3175 for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) {
3176 if (I->modifiesRegister(X86::EFLAGS, TRI)) {
3177 ConditionDef = &*I;
3178 break;
3179 }
3180
3181 if (I->readsRegister(X86::EFLAGS, TRI))
3182 SingleUseCondition = false;
3183 }
3184
3185 if (!ConditionDef)
3186 return true;
3187
3188 if (SingleUseCondition) {
3189 for (auto *Succ : MBB.successors())
3190 if (Succ->isLiveIn(X86::EFLAGS))
3191 SingleUseCondition = false;
3192 }
3193
3194 MBP.ConditionDef = ConditionDef;
3195 MBP.SingleUseCondition = SingleUseCondition;
3196
3197 // Currently we only recognize the simple pattern:
3198 //
3199 // test %reg, %reg
3200 // je %label
3201 //
3202 const unsigned TestOpcode =
3203 Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
3204
3205 if (ConditionDef->getOpcode() == TestOpcode &&
3206 ConditionDef->getNumOperands() == 3 &&
3207 ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) &&
3208 (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) {
3209 MBP.LHS = ConditionDef->getOperand(0);
3210 MBP.RHS = MachineOperand::CreateImm(0);
3211 MBP.Predicate = Cond[0].getImm() == X86::COND_NE
3212 ? MachineBranchPredicate::PRED_NE
3213 : MachineBranchPredicate::PRED_EQ;
3214 return false;
3215 }
3216
3217 return true;
3218 }
3219
removeBranch(MachineBasicBlock & MBB,int * BytesRemoved) const3220 unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB,
3221 int *BytesRemoved) const {
3222 assert(!BytesRemoved && "code size not handled");
3223
3224 MachineBasicBlock::iterator I = MBB.end();
3225 unsigned Count = 0;
3226
3227 while (I != MBB.begin()) {
3228 --I;
3229 if (I->isDebugInstr())
3230 continue;
3231 if (I->getOpcode() != X86::JMP_1 &&
3232 X86::getCondFromBranch(*I) == X86::COND_INVALID)
3233 break;
3234 // Remove the branch.
3235 I->eraseFromParent();
3236 I = MBB.end();
3237 ++Count;
3238 }
3239
3240 return Count;
3241 }
3242
insertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,ArrayRef<MachineOperand> Cond,const DebugLoc & DL,int * BytesAdded) const3243 unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB,
3244 MachineBasicBlock *TBB,
3245 MachineBasicBlock *FBB,
3246 ArrayRef<MachineOperand> Cond,
3247 const DebugLoc &DL,
3248 int *BytesAdded) const {
3249 // Shouldn't be a fall through.
3250 assert(TBB && "insertBranch must not be told to insert a fallthrough");
3251 assert((Cond.size() == 1 || Cond.size() == 0) &&
3252 "X86 branch conditions have one component!");
3253 assert(!BytesAdded && "code size not handled");
3254
3255 if (Cond.empty()) {
3256 // Unconditional branch?
3257 assert(!FBB && "Unconditional branch with multiple successors!");
3258 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB);
3259 return 1;
3260 }
3261
3262 // If FBB is null, it is implied to be a fall-through block.
3263 bool FallThru = FBB == nullptr;
3264
3265 // Conditional branch.
3266 unsigned Count = 0;
3267 X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
3268 switch (CC) {
3269 case X86::COND_NE_OR_P:
3270 // Synthesize NE_OR_P with two branches.
3271 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE);
3272 ++Count;
3273 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P);
3274 ++Count;
3275 break;
3276 case X86::COND_E_AND_NP:
3277 // Use the next block of MBB as FBB if it is null.
3278 if (FBB == nullptr) {
3279 FBB = getFallThroughMBB(&MBB, TBB);
3280 assert(FBB && "MBB cannot be the last block in function when the false "
3281 "body is a fall-through.");
3282 }
3283 // Synthesize COND_E_AND_NP with two branches.
3284 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE);
3285 ++Count;
3286 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP);
3287 ++Count;
3288 break;
3289 default: {
3290 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC);
3291 ++Count;
3292 }
3293 }
3294 if (!FallThru) {
3295 // Two-way Conditional branch. Insert the second branch.
3296 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
3297 ++Count;
3298 }
3299 return Count;
3300 }
3301
canInsertSelect(const MachineBasicBlock & MBB,ArrayRef<MachineOperand> Cond,Register DstReg,Register TrueReg,Register FalseReg,int & CondCycles,int & TrueCycles,int & FalseCycles) const3302 bool X86InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
3303 ArrayRef<MachineOperand> Cond,
3304 Register DstReg, Register TrueReg,
3305 Register FalseReg, int &CondCycles,
3306 int &TrueCycles, int &FalseCycles) const {
3307 // Not all subtargets have cmov instructions.
3308 if (!Subtarget.hasCMov())
3309 return false;
3310 if (Cond.size() != 1)
3311 return false;
3312 // We cannot do the composite conditions, at least not in SSA form.
3313 if ((X86::CondCode)Cond[0].getImm() > X86::LAST_VALID_COND)
3314 return false;
3315
3316 // Check register classes.
3317 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3318 const TargetRegisterClass *RC =
3319 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
3320 if (!RC)
3321 return false;
3322
3323 // We have cmov instructions for 16, 32, and 64 bit general purpose registers.
3324 if (X86::GR16RegClass.hasSubClassEq(RC) ||
3325 X86::GR32RegClass.hasSubClassEq(RC) ||
3326 X86::GR64RegClass.hasSubClassEq(RC)) {
3327 // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy
3328 // Bridge. Probably Ivy Bridge as well.
3329 CondCycles = 2;
3330 TrueCycles = 2;
3331 FalseCycles = 2;
3332 return true;
3333 }
3334
3335 // Can't do vectors.
3336 return false;
3337 }
3338
insertSelect(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register DstReg,ArrayRef<MachineOperand> Cond,Register TrueReg,Register FalseReg) const3339 void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
3340 MachineBasicBlock::iterator I,
3341 const DebugLoc &DL, Register DstReg,
3342 ArrayRef<MachineOperand> Cond, Register TrueReg,
3343 Register FalseReg) const {
3344 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3345 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
3346 const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
3347 assert(Cond.size() == 1 && "Invalid Cond array");
3348 unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8,
3349 false /*HasMemoryOperand*/);
3350 BuildMI(MBB, I, DL, get(Opc), DstReg)
3351 .addReg(FalseReg)
3352 .addReg(TrueReg)
3353 .addImm(Cond[0].getImm());
3354 }
3355
3356 /// Test if the given register is a physical h register.
isHReg(unsigned Reg)3357 static bool isHReg(unsigned Reg) {
3358 return X86::GR8_ABCD_HRegClass.contains(Reg);
3359 }
3360
3361 // Try and copy between VR128/VR64 and GR64 registers.
CopyToFromAsymmetricReg(unsigned DestReg,unsigned SrcReg,const X86Subtarget & Subtarget)3362 static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
3363 const X86Subtarget &Subtarget) {
3364 bool HasAVX = Subtarget.hasAVX();
3365 bool HasAVX512 = Subtarget.hasAVX512();
3366
3367 // SrcReg(MaskReg) -> DestReg(GR64)
3368 // SrcReg(MaskReg) -> DestReg(GR32)
3369
3370 // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3371 if (X86::VK16RegClass.contains(SrcReg)) {
3372 if (X86::GR64RegClass.contains(DestReg)) {
3373 assert(Subtarget.hasBWI());
3374 return X86::KMOVQrk;
3375 }
3376 if (X86::GR32RegClass.contains(DestReg))
3377 return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk;
3378 }
3379
3380 // SrcReg(GR64) -> DestReg(MaskReg)
3381 // SrcReg(GR32) -> DestReg(MaskReg)
3382
3383 // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3384 if (X86::VK16RegClass.contains(DestReg)) {
3385 if (X86::GR64RegClass.contains(SrcReg)) {
3386 assert(Subtarget.hasBWI());
3387 return X86::KMOVQkr;
3388 }
3389 if (X86::GR32RegClass.contains(SrcReg))
3390 return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr;
3391 }
3392
3393
3394 // SrcReg(VR128) -> DestReg(GR64)
3395 // SrcReg(VR64) -> DestReg(GR64)
3396 // SrcReg(GR64) -> DestReg(VR128)
3397 // SrcReg(GR64) -> DestReg(VR64)
3398
3399 if (X86::GR64RegClass.contains(DestReg)) {
3400 if (X86::VR128XRegClass.contains(SrcReg))
3401 // Copy from a VR128 register to a GR64 register.
3402 return HasAVX512 ? X86::VMOVPQIto64Zrr :
3403 HasAVX ? X86::VMOVPQIto64rr :
3404 X86::MOVPQIto64rr;
3405 if (X86::VR64RegClass.contains(SrcReg))
3406 // Copy from a VR64 register to a GR64 register.
3407 return X86::MMX_MOVD64from64rr;
3408 } else if (X86::GR64RegClass.contains(SrcReg)) {
3409 // Copy from a GR64 register to a VR128 register.
3410 if (X86::VR128XRegClass.contains(DestReg))
3411 return HasAVX512 ? X86::VMOV64toPQIZrr :
3412 HasAVX ? X86::VMOV64toPQIrr :
3413 X86::MOV64toPQIrr;
3414 // Copy from a GR64 register to a VR64 register.
3415 if (X86::VR64RegClass.contains(DestReg))
3416 return X86::MMX_MOVD64to64rr;
3417 }
3418
3419 // SrcReg(VR128) -> DestReg(GR32)
3420 // SrcReg(GR32) -> DestReg(VR128)
3421
3422 if (X86::GR32RegClass.contains(DestReg) &&
3423 X86::VR128XRegClass.contains(SrcReg))
3424 // Copy from a VR128 register to a GR32 register.
3425 return HasAVX512 ? X86::VMOVPDI2DIZrr :
3426 HasAVX ? X86::VMOVPDI2DIrr :
3427 X86::MOVPDI2DIrr;
3428
3429 if (X86::VR128XRegClass.contains(DestReg) &&
3430 X86::GR32RegClass.contains(SrcReg))
3431 // Copy from a VR128 register to a VR128 register.
3432 return HasAVX512 ? X86::VMOVDI2PDIZrr :
3433 HasAVX ? X86::VMOVDI2PDIrr :
3434 X86::MOVDI2PDIrr;
3435 return 0;
3436 }
3437
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const DebugLoc & DL,MCRegister DestReg,MCRegister SrcReg,bool KillSrc) const3438 void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
3439 MachineBasicBlock::iterator MI,
3440 const DebugLoc &DL, MCRegister DestReg,
3441 MCRegister SrcReg, bool KillSrc) const {
3442 // First deal with the normal symmetric copies.
3443 bool HasAVX = Subtarget.hasAVX();
3444 bool HasVLX = Subtarget.hasVLX();
3445 unsigned Opc = 0;
3446 if (X86::GR64RegClass.contains(DestReg, SrcReg))
3447 Opc = X86::MOV64rr;
3448 else if (X86::GR32RegClass.contains(DestReg, SrcReg))
3449 Opc = X86::MOV32rr;
3450 else if (X86::GR16RegClass.contains(DestReg, SrcReg))
3451 Opc = X86::MOV16rr;
3452 else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
3453 // Copying to or from a physical H register on x86-64 requires a NOREX
3454 // move. Otherwise use a normal move.
3455 if ((isHReg(DestReg) || isHReg(SrcReg)) &&
3456 Subtarget.is64Bit()) {
3457 Opc = X86::MOV8rr_NOREX;
3458 // Both operands must be encodable without an REX prefix.
3459 assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
3460 "8-bit H register can not be copied outside GR8_NOREX");
3461 } else
3462 Opc = X86::MOV8rr;
3463 }
3464 else if (X86::VR64RegClass.contains(DestReg, SrcReg))
3465 Opc = X86::MMX_MOVQ64rr;
3466 else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) {
3467 if (HasVLX)
3468 Opc = X86::VMOVAPSZ128rr;
3469 else if (X86::VR128RegClass.contains(DestReg, SrcReg))
3470 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
3471 else {
3472 // If this an extended register and we don't have VLX we need to use a
3473 // 512-bit move.
3474 Opc = X86::VMOVAPSZrr;
3475 const TargetRegisterInfo *TRI = &getRegisterInfo();
3476 DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm,
3477 &X86::VR512RegClass);
3478 SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm,
3479 &X86::VR512RegClass);
3480 }
3481 } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) {
3482 if (HasVLX)
3483 Opc = X86::VMOVAPSZ256rr;
3484 else if (X86::VR256RegClass.contains(DestReg, SrcReg))
3485 Opc = X86::VMOVAPSYrr;
3486 else {
3487 // If this an extended register and we don't have VLX we need to use a
3488 // 512-bit move.
3489 Opc = X86::VMOVAPSZrr;
3490 const TargetRegisterInfo *TRI = &getRegisterInfo();
3491 DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm,
3492 &X86::VR512RegClass);
3493 SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm,
3494 &X86::VR512RegClass);
3495 }
3496 } else if (X86::VR512RegClass.contains(DestReg, SrcReg))
3497 Opc = X86::VMOVAPSZrr;
3498 // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3499 else if (X86::VK16RegClass.contains(DestReg, SrcReg))
3500 Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk;
3501 if (!Opc)
3502 Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget);
3503
3504 if (Opc) {
3505 BuildMI(MBB, MI, DL, get(Opc), DestReg)
3506 .addReg(SrcReg, getKillRegState(KillSrc));
3507 return;
3508 }
3509
3510 if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
3511 // FIXME: We use a fatal error here because historically LLVM has tried
3512 // lower some of these physreg copies and we want to ensure we get
3513 // reasonable bug reports if someone encounters a case no other testing
3514 // found. This path should be removed after the LLVM 7 release.
3515 report_fatal_error("Unable to copy EFLAGS physical register!");
3516 }
3517
3518 LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
3519 << RI.getName(DestReg) << '\n');
3520 report_fatal_error("Cannot emit physreg copy instruction");
3521 }
3522
3523 Optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr & MI) const3524 X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
3525 if (MI.isMoveReg())
3526 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
3527 return None;
3528 }
3529
getLoadStoreRegOpcode(unsigned Reg,const TargetRegisterClass * RC,bool isStackAligned,const X86Subtarget & STI,bool load)3530 static unsigned getLoadStoreRegOpcode(unsigned Reg,
3531 const TargetRegisterClass *RC,
3532 bool isStackAligned,
3533 const X86Subtarget &STI,
3534 bool load) {
3535 bool HasAVX = STI.hasAVX();
3536 bool HasAVX512 = STI.hasAVX512();
3537 bool HasVLX = STI.hasVLX();
3538
3539 switch (STI.getRegisterInfo()->getSpillSize(*RC)) {
3540 default:
3541 llvm_unreachable("Unknown spill size");
3542 case 1:
3543 assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
3544 if (STI.is64Bit())
3545 // Copying to or from a physical H register on x86-64 requires a NOREX
3546 // move. Otherwise use a normal move.
3547 if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
3548 return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
3549 return load ? X86::MOV8rm : X86::MOV8mr;
3550 case 2:
3551 if (X86::VK16RegClass.hasSubClassEq(RC))
3552 return load ? X86::KMOVWkm : X86::KMOVWmk;
3553 assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
3554 return load ? X86::MOV16rm : X86::MOV16mr;
3555 case 4:
3556 if (X86::GR32RegClass.hasSubClassEq(RC))
3557 return load ? X86::MOV32rm : X86::MOV32mr;
3558 if (X86::FR32XRegClass.hasSubClassEq(RC))
3559 return load ?
3560 (HasAVX512 ? X86::VMOVSSZrm_alt :
3561 HasAVX ? X86::VMOVSSrm_alt :
3562 X86::MOVSSrm_alt) :
3563 (HasAVX512 ? X86::VMOVSSZmr :
3564 HasAVX ? X86::VMOVSSmr :
3565 X86::MOVSSmr);
3566 if (X86::RFP32RegClass.hasSubClassEq(RC))
3567 return load ? X86::LD_Fp32m : X86::ST_Fp32m;
3568 if (X86::VK32RegClass.hasSubClassEq(RC)) {
3569 assert(STI.hasBWI() && "KMOVD requires BWI");
3570 return load ? X86::KMOVDkm : X86::KMOVDmk;
3571 }
3572 // All of these mask pair classes have the same spill size, the same kind
3573 // of kmov instructions can be used with all of them.
3574 if (X86::VK1PAIRRegClass.hasSubClassEq(RC) ||
3575 X86::VK2PAIRRegClass.hasSubClassEq(RC) ||
3576 X86::VK4PAIRRegClass.hasSubClassEq(RC) ||
3577 X86::VK8PAIRRegClass.hasSubClassEq(RC) ||
3578 X86::VK16PAIRRegClass.hasSubClassEq(RC))
3579 return load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE;
3580 llvm_unreachable("Unknown 4-byte regclass");
3581 case 8:
3582 if (X86::GR64RegClass.hasSubClassEq(RC))
3583 return load ? X86::MOV64rm : X86::MOV64mr;
3584 if (X86::FR64XRegClass.hasSubClassEq(RC))
3585 return load ?
3586 (HasAVX512 ? X86::VMOVSDZrm_alt :
3587 HasAVX ? X86::VMOVSDrm_alt :
3588 X86::MOVSDrm_alt) :
3589 (HasAVX512 ? X86::VMOVSDZmr :
3590 HasAVX ? X86::VMOVSDmr :
3591 X86::MOVSDmr);
3592 if (X86::VR64RegClass.hasSubClassEq(RC))
3593 return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
3594 if (X86::RFP64RegClass.hasSubClassEq(RC))
3595 return load ? X86::LD_Fp64m : X86::ST_Fp64m;
3596 if (X86::VK64RegClass.hasSubClassEq(RC)) {
3597 assert(STI.hasBWI() && "KMOVQ requires BWI");
3598 return load ? X86::KMOVQkm : X86::KMOVQmk;
3599 }
3600 llvm_unreachable("Unknown 8-byte regclass");
3601 case 10:
3602 assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
3603 return load ? X86::LD_Fp80m : X86::ST_FpP80m;
3604 case 16: {
3605 if (X86::VR128XRegClass.hasSubClassEq(RC)) {
3606 // If stack is realigned we can use aligned stores.
3607 if (isStackAligned)
3608 return load ?
3609 (HasVLX ? X86::VMOVAPSZ128rm :
3610 HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX :
3611 HasAVX ? X86::VMOVAPSrm :
3612 X86::MOVAPSrm):
3613 (HasVLX ? X86::VMOVAPSZ128mr :
3614 HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX :
3615 HasAVX ? X86::VMOVAPSmr :
3616 X86::MOVAPSmr);
3617 else
3618 return load ?
3619 (HasVLX ? X86::VMOVUPSZ128rm :
3620 HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX :
3621 HasAVX ? X86::VMOVUPSrm :
3622 X86::MOVUPSrm):
3623 (HasVLX ? X86::VMOVUPSZ128mr :
3624 HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX :
3625 HasAVX ? X86::VMOVUPSmr :
3626 X86::MOVUPSmr);
3627 }
3628 if (X86::BNDRRegClass.hasSubClassEq(RC)) {
3629 if (STI.is64Bit())
3630 return load ? X86::BNDMOV64rm : X86::BNDMOV64mr;
3631 else
3632 return load ? X86::BNDMOV32rm : X86::BNDMOV32mr;
3633 }
3634 llvm_unreachable("Unknown 16-byte regclass");
3635 }
3636 case 32:
3637 assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass");
3638 // If stack is realigned we can use aligned stores.
3639 if (isStackAligned)
3640 return load ?
3641 (HasVLX ? X86::VMOVAPSZ256rm :
3642 HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX :
3643 X86::VMOVAPSYrm) :
3644 (HasVLX ? X86::VMOVAPSZ256mr :
3645 HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX :
3646 X86::VMOVAPSYmr);
3647 else
3648 return load ?
3649 (HasVLX ? X86::VMOVUPSZ256rm :
3650 HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX :
3651 X86::VMOVUPSYrm) :
3652 (HasVLX ? X86::VMOVUPSZ256mr :
3653 HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX :
3654 X86::VMOVUPSYmr);
3655 case 64:
3656 assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
3657 assert(STI.hasAVX512() && "Using 512-bit register requires AVX512");
3658 if (isStackAligned)
3659 return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
3660 else
3661 return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
3662 }
3663 }
3664
getMemOperandsWithOffsetWidth(const MachineInstr & MemOp,SmallVectorImpl<const MachineOperand * > & BaseOps,int64_t & Offset,bool & OffsetIsScalable,unsigned & Width,const TargetRegisterInfo * TRI) const3665 bool X86InstrInfo::getMemOperandsWithOffsetWidth(
3666 const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps,
3667 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
3668 const TargetRegisterInfo *TRI) const {
3669 const MCInstrDesc &Desc = MemOp.getDesc();
3670 int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
3671 if (MemRefBegin < 0)
3672 return false;
3673
3674 MemRefBegin += X86II::getOperandBias(Desc);
3675
3676 const MachineOperand *BaseOp =
3677 &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
3678 if (!BaseOp->isReg()) // Can be an MO_FrameIndex
3679 return false;
3680
3681 if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
3682 return false;
3683
3684 if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
3685 X86::NoRegister)
3686 return false;
3687
3688 const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp);
3689
3690 // Displacement can be symbolic
3691 if (!DispMO.isImm())
3692 return false;
3693
3694 Offset = DispMO.getImm();
3695
3696 if (!BaseOp->isReg())
3697 return false;
3698
3699 OffsetIsScalable = false;
3700 // FIXME: Relying on memoperands() may not be right thing to do here. Check
3701 // with X86 maintainers, and fix it accordingly. For now, it is ok, since
3702 // there is no use of `Width` for X86 back-end at the moment.
3703 Width =
3704 !MemOp.memoperands_empty() ? MemOp.memoperands().front()->getSize() : 0;
3705 BaseOps.push_back(BaseOp);
3706 return true;
3707 }
3708
getStoreRegOpcode(unsigned SrcReg,const TargetRegisterClass * RC,bool isStackAligned,const X86Subtarget & STI)3709 static unsigned getStoreRegOpcode(unsigned SrcReg,
3710 const TargetRegisterClass *RC,
3711 bool isStackAligned,
3712 const X86Subtarget &STI) {
3713 return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false);
3714 }
3715
3716
getLoadRegOpcode(unsigned DestReg,const TargetRegisterClass * RC,bool isStackAligned,const X86Subtarget & STI)3717 static unsigned getLoadRegOpcode(unsigned DestReg,
3718 const TargetRegisterClass *RC,
3719 bool isStackAligned,
3720 const X86Subtarget &STI) {
3721 return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true);
3722 }
3723
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,Register SrcReg,bool isKill,int FrameIdx,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const3724 void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
3725 MachineBasicBlock::iterator MI,
3726 Register SrcReg, bool isKill, int FrameIdx,
3727 const TargetRegisterClass *RC,
3728 const TargetRegisterInfo *TRI) const {
3729 const MachineFunction &MF = *MBB.getParent();
3730 assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
3731 "Stack slot too small for store");
3732 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
3733 bool isAligned =
3734 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
3735 RI.canRealignStack(MF);
3736 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
3737 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
3738 .addReg(SrcReg, getKillRegState(isKill));
3739 }
3740
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,Register DestReg,int FrameIdx,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const3741 void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
3742 MachineBasicBlock::iterator MI,
3743 Register DestReg, int FrameIdx,
3744 const TargetRegisterClass *RC,
3745 const TargetRegisterInfo *TRI) const {
3746 const MachineFunction &MF = *MBB.getParent();
3747 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
3748 bool isAligned =
3749 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
3750 RI.canRealignStack(MF);
3751 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
3752 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), FrameIdx);
3753 }
3754
analyzeCompare(const MachineInstr & MI,Register & SrcReg,Register & SrcReg2,int & CmpMask,int & CmpValue) const3755 bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
3756 Register &SrcReg2, int &CmpMask,
3757 int &CmpValue) const {
3758 switch (MI.getOpcode()) {
3759 default: break;
3760 case X86::CMP64ri32:
3761 case X86::CMP64ri8:
3762 case X86::CMP32ri:
3763 case X86::CMP32ri8:
3764 case X86::CMP16ri:
3765 case X86::CMP16ri8:
3766 case X86::CMP8ri:
3767 SrcReg = MI.getOperand(0).getReg();
3768 SrcReg2 = 0;
3769 if (MI.getOperand(1).isImm()) {
3770 CmpMask = ~0;
3771 CmpValue = MI.getOperand(1).getImm();
3772 } else {
3773 CmpMask = CmpValue = 0;
3774 }
3775 return true;
3776 // A SUB can be used to perform comparison.
3777 case X86::SUB64rm:
3778 case X86::SUB32rm:
3779 case X86::SUB16rm:
3780 case X86::SUB8rm:
3781 SrcReg = MI.getOperand(1).getReg();
3782 SrcReg2 = 0;
3783 CmpMask = 0;
3784 CmpValue = 0;
3785 return true;
3786 case X86::SUB64rr:
3787 case X86::SUB32rr:
3788 case X86::SUB16rr:
3789 case X86::SUB8rr:
3790 SrcReg = MI.getOperand(1).getReg();
3791 SrcReg2 = MI.getOperand(2).getReg();
3792 CmpMask = 0;
3793 CmpValue = 0;
3794 return true;
3795 case X86::SUB64ri32:
3796 case X86::SUB64ri8:
3797 case X86::SUB32ri:
3798 case X86::SUB32ri8:
3799 case X86::SUB16ri:
3800 case X86::SUB16ri8:
3801 case X86::SUB8ri:
3802 SrcReg = MI.getOperand(1).getReg();
3803 SrcReg2 = 0;
3804 if (MI.getOperand(2).isImm()) {
3805 CmpMask = ~0;
3806 CmpValue = MI.getOperand(2).getImm();
3807 } else {
3808 CmpMask = CmpValue = 0;
3809 }
3810 return true;
3811 case X86::CMP64rr:
3812 case X86::CMP32rr:
3813 case X86::CMP16rr:
3814 case X86::CMP8rr:
3815 SrcReg = MI.getOperand(0).getReg();
3816 SrcReg2 = MI.getOperand(1).getReg();
3817 CmpMask = 0;
3818 CmpValue = 0;
3819 return true;
3820 case X86::TEST8rr:
3821 case X86::TEST16rr:
3822 case X86::TEST32rr:
3823 case X86::TEST64rr:
3824 SrcReg = MI.getOperand(0).getReg();
3825 if (MI.getOperand(1).getReg() != SrcReg)
3826 return false;
3827 // Compare against zero.
3828 SrcReg2 = 0;
3829 CmpMask = ~0;
3830 CmpValue = 0;
3831 return true;
3832 }
3833 return false;
3834 }
3835
3836 /// Check whether the first instruction, whose only
3837 /// purpose is to update flags, can be made redundant.
3838 /// CMPrr can be made redundant by SUBrr if the operands are the same.
3839 /// This function can be extended later on.
3840 /// SrcReg, SrcRegs: register operands for FlagI.
3841 /// ImmValue: immediate for FlagI if it takes an immediate.
isRedundantFlagInstr(const MachineInstr & FlagI,Register SrcReg,Register SrcReg2,int ImmMask,int ImmValue,const MachineInstr & OI)3842 inline static bool isRedundantFlagInstr(const MachineInstr &FlagI,
3843 Register SrcReg, Register SrcReg2,
3844 int ImmMask, int ImmValue,
3845 const MachineInstr &OI) {
3846 if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) ||
3847 (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) ||
3848 (FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) ||
3849 (FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) &&
3850 ((OI.getOperand(1).getReg() == SrcReg &&
3851 OI.getOperand(2).getReg() == SrcReg2) ||
3852 (OI.getOperand(1).getReg() == SrcReg2 &&
3853 OI.getOperand(2).getReg() == SrcReg)))
3854 return true;
3855
3856 if (ImmMask != 0 &&
3857 ((FlagI.getOpcode() == X86::CMP64ri32 &&
3858 OI.getOpcode() == X86::SUB64ri32) ||
3859 (FlagI.getOpcode() == X86::CMP64ri8 &&
3860 OI.getOpcode() == X86::SUB64ri8) ||
3861 (FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) ||
3862 (FlagI.getOpcode() == X86::CMP32ri8 &&
3863 OI.getOpcode() == X86::SUB32ri8) ||
3864 (FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) ||
3865 (FlagI.getOpcode() == X86::CMP16ri8 &&
3866 OI.getOpcode() == X86::SUB16ri8) ||
3867 (FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) &&
3868 OI.getOperand(1).getReg() == SrcReg &&
3869 OI.getOperand(2).getImm() == ImmValue)
3870 return true;
3871 return false;
3872 }
3873
3874 /// Check whether the definition can be converted
3875 /// to remove a comparison against zero.
isDefConvertible(const MachineInstr & MI,bool & NoSignFlag)3876 inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag) {
3877 NoSignFlag = false;
3878
3879 switch (MI.getOpcode()) {
3880 default: return false;
3881
3882 // The shift instructions only modify ZF if their shift count is non-zero.
3883 // N.B.: The processor truncates the shift count depending on the encoding.
3884 case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri:
3885 case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri:
3886 return getTruncatedShiftCount(MI, 2) != 0;
3887
3888 // Some left shift instructions can be turned into LEA instructions but only
3889 // if their flags aren't used. Avoid transforming such instructions.
3890 case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{
3891 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
3892 if (isTruncatedShiftCountForLEA(ShAmt)) return false;
3893 return ShAmt != 0;
3894 }
3895
3896 case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8:
3897 case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8:
3898 return getTruncatedShiftCount(MI, 3) != 0;
3899
3900 case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri:
3901 case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8:
3902 case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr:
3903 case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm:
3904 case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm:
3905 case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r:
3906 case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
3907 case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8:
3908 case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr:
3909 case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm:
3910 case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm:
3911 case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r:
3912 case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri:
3913 case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8:
3914 case X86::AND8ri: case X86::AND64rr: case X86::AND32rr:
3915 case X86::AND16rr: case X86::AND8rr: case X86::AND64rm:
3916 case X86::AND32rm: case X86::AND16rm: case X86::AND8rm:
3917 case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri:
3918 case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8:
3919 case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr:
3920 case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm:
3921 case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm:
3922 case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri:
3923 case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8:
3924 case X86::OR8ri: case X86::OR64rr: case X86::OR32rr:
3925 case X86::OR16rr: case X86::OR8rr: case X86::OR64rm:
3926 case X86::OR32rm: case X86::OR16rm: case X86::OR8rm:
3927 case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri:
3928 case X86::ADC32ri8: case X86::ADC16ri: case X86::ADC16ri8:
3929 case X86::ADC8ri: case X86::ADC64rr: case X86::ADC32rr:
3930 case X86::ADC16rr: case X86::ADC8rr: case X86::ADC64rm:
3931 case X86::ADC32rm: case X86::ADC16rm: case X86::ADC8rm:
3932 case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri:
3933 case X86::SBB32ri8: case X86::SBB16ri: case X86::SBB16ri8:
3934 case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr:
3935 case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm:
3936 case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm:
3937 case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r:
3938 case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1:
3939 case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1:
3940 case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1:
3941 case X86::ANDN32rr: case X86::ANDN32rm:
3942 case X86::ANDN64rr: case X86::ANDN64rm:
3943 case X86::BLSI32rr: case X86::BLSI32rm:
3944 case X86::BLSI64rr: case X86::BLSI64rm:
3945 case X86::BLSMSK32rr:case X86::BLSMSK32rm:
3946 case X86::BLSMSK64rr:case X86::BLSMSK64rm:
3947 case X86::BLSR32rr: case X86::BLSR32rm:
3948 case X86::BLSR64rr: case X86::BLSR64rm:
3949 case X86::BZHI32rr: case X86::BZHI32rm:
3950 case X86::BZHI64rr: case X86::BZHI64rm:
3951 case X86::LZCNT16rr: case X86::LZCNT16rm:
3952 case X86::LZCNT32rr: case X86::LZCNT32rm:
3953 case X86::LZCNT64rr: case X86::LZCNT64rm:
3954 case X86::POPCNT16rr:case X86::POPCNT16rm:
3955 case X86::POPCNT32rr:case X86::POPCNT32rm:
3956 case X86::POPCNT64rr:case X86::POPCNT64rm:
3957 case X86::TZCNT16rr: case X86::TZCNT16rm:
3958 case X86::TZCNT32rr: case X86::TZCNT32rm:
3959 case X86::TZCNT64rr: case X86::TZCNT64rm:
3960 case X86::BLCFILL32rr: case X86::BLCFILL32rm:
3961 case X86::BLCFILL64rr: case X86::BLCFILL64rm:
3962 case X86::BLCI32rr: case X86::BLCI32rm:
3963 case X86::BLCI64rr: case X86::BLCI64rm:
3964 case X86::BLCIC32rr: case X86::BLCIC32rm:
3965 case X86::BLCIC64rr: case X86::BLCIC64rm:
3966 case X86::BLCMSK32rr: case X86::BLCMSK32rm:
3967 case X86::BLCMSK64rr: case X86::BLCMSK64rm:
3968 case X86::BLCS32rr: case X86::BLCS32rm:
3969 case X86::BLCS64rr: case X86::BLCS64rm:
3970 case X86::BLSFILL32rr: case X86::BLSFILL32rm:
3971 case X86::BLSFILL64rr: case X86::BLSFILL64rm:
3972 case X86::BLSIC32rr: case X86::BLSIC32rm:
3973 case X86::BLSIC64rr: case X86::BLSIC64rm:
3974 case X86::T1MSKC32rr: case X86::T1MSKC32rm:
3975 case X86::T1MSKC64rr: case X86::T1MSKC64rm:
3976 case X86::TZMSK32rr: case X86::TZMSK32rm:
3977 case X86::TZMSK64rr: case X86::TZMSK64rm:
3978 return true;
3979 case X86::BEXTR32rr: case X86::BEXTR64rr:
3980 case X86::BEXTR32rm: case X86::BEXTR64rm:
3981 case X86::BEXTRI32ri: case X86::BEXTRI32mi:
3982 case X86::BEXTRI64ri: case X86::BEXTRI64mi:
3983 // BEXTR doesn't update the sign flag so we can't use it.
3984 NoSignFlag = true;
3985 return true;
3986 }
3987 }
3988
3989 /// Check whether the use can be converted to remove a comparison against zero.
isUseDefConvertible(const MachineInstr & MI)3990 static X86::CondCode isUseDefConvertible(const MachineInstr &MI) {
3991 switch (MI.getOpcode()) {
3992 default: return X86::COND_INVALID;
3993 case X86::NEG8r:
3994 case X86::NEG16r:
3995 case X86::NEG32r:
3996 case X86::NEG64r:
3997 return X86::COND_AE;
3998 case X86::LZCNT16rr:
3999 case X86::LZCNT32rr:
4000 case X86::LZCNT64rr:
4001 return X86::COND_B;
4002 case X86::POPCNT16rr:
4003 case X86::POPCNT32rr:
4004 case X86::POPCNT64rr:
4005 return X86::COND_E;
4006 case X86::TZCNT16rr:
4007 case X86::TZCNT32rr:
4008 case X86::TZCNT64rr:
4009 return X86::COND_B;
4010 case X86::BSF16rr:
4011 case X86::BSF32rr:
4012 case X86::BSF64rr:
4013 case X86::BSR16rr:
4014 case X86::BSR32rr:
4015 case X86::BSR64rr:
4016 return X86::COND_E;
4017 case X86::BLSI32rr:
4018 case X86::BLSI64rr:
4019 return X86::COND_AE;
4020 case X86::BLSR32rr:
4021 case X86::BLSR64rr:
4022 case X86::BLSMSK32rr:
4023 case X86::BLSMSK64rr:
4024 return X86::COND_B;
4025 // TODO: TBM instructions.
4026 }
4027 }
4028
4029 /// Check if there exists an earlier instruction that
4030 /// operates on the same source operands and sets flags in the same way as
4031 /// Compare; remove Compare if possible.
optimizeCompareInstr(MachineInstr & CmpInstr,Register SrcReg,Register SrcReg2,int CmpMask,int CmpValue,const MachineRegisterInfo * MRI) const4032 bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
4033 Register SrcReg2, int CmpMask,
4034 int CmpValue,
4035 const MachineRegisterInfo *MRI) const {
4036 // Check whether we can replace SUB with CMP.
4037 switch (CmpInstr.getOpcode()) {
4038 default: break;
4039 case X86::SUB64ri32:
4040 case X86::SUB64ri8:
4041 case X86::SUB32ri:
4042 case X86::SUB32ri8:
4043 case X86::SUB16ri:
4044 case X86::SUB16ri8:
4045 case X86::SUB8ri:
4046 case X86::SUB64rm:
4047 case X86::SUB32rm:
4048 case X86::SUB16rm:
4049 case X86::SUB8rm:
4050 case X86::SUB64rr:
4051 case X86::SUB32rr:
4052 case X86::SUB16rr:
4053 case X86::SUB8rr: {
4054 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
4055 return false;
4056 // There is no use of the destination register, we can replace SUB with CMP.
4057 unsigned NewOpcode = 0;
4058 switch (CmpInstr.getOpcode()) {
4059 default: llvm_unreachable("Unreachable!");
4060 case X86::SUB64rm: NewOpcode = X86::CMP64rm; break;
4061 case X86::SUB32rm: NewOpcode = X86::CMP32rm; break;
4062 case X86::SUB16rm: NewOpcode = X86::CMP16rm; break;
4063 case X86::SUB8rm: NewOpcode = X86::CMP8rm; break;
4064 case X86::SUB64rr: NewOpcode = X86::CMP64rr; break;
4065 case X86::SUB32rr: NewOpcode = X86::CMP32rr; break;
4066 case X86::SUB16rr: NewOpcode = X86::CMP16rr; break;
4067 case X86::SUB8rr: NewOpcode = X86::CMP8rr; break;
4068 case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break;
4069 case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break;
4070 case X86::SUB32ri: NewOpcode = X86::CMP32ri; break;
4071 case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break;
4072 case X86::SUB16ri: NewOpcode = X86::CMP16ri; break;
4073 case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break;
4074 case X86::SUB8ri: NewOpcode = X86::CMP8ri; break;
4075 }
4076 CmpInstr.setDesc(get(NewOpcode));
4077 CmpInstr.RemoveOperand(0);
4078 // Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
4079 if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
4080 NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
4081 return false;
4082 }
4083 }
4084
4085 // Get the unique definition of SrcReg.
4086 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
4087 if (!MI) return false;
4088
4089 // CmpInstr is the first instruction of the BB.
4090 MachineBasicBlock::iterator I = CmpInstr, Def = MI;
4091
4092 // If we are comparing against zero, check whether we can use MI to update
4093 // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize.
4094 bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
4095 if (IsCmpZero && MI->getParent() != CmpInstr.getParent())
4096 return false;
4097
4098 // If we have a use of the source register between the def and our compare
4099 // instruction we can eliminate the compare iff the use sets EFLAGS in the
4100 // right way.
4101 bool ShouldUpdateCC = false;
4102 bool NoSignFlag = false;
4103 X86::CondCode NewCC = X86::COND_INVALID;
4104 if (IsCmpZero && !isDefConvertible(*MI, NoSignFlag)) {
4105 // Scan forward from the use until we hit the use we're looking for or the
4106 // compare instruction.
4107 for (MachineBasicBlock::iterator J = MI;; ++J) {
4108 // Do we have a convertible instruction?
4109 NewCC = isUseDefConvertible(*J);
4110 if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() &&
4111 J->getOperand(1).getReg() == SrcReg) {
4112 assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!");
4113 ShouldUpdateCC = true; // Update CC later on.
4114 // This is not a def of SrcReg, but still a def of EFLAGS. Keep going
4115 // with the new def.
4116 Def = J;
4117 MI = &*Def;
4118 break;
4119 }
4120
4121 if (J == I)
4122 return false;
4123 }
4124 }
4125
4126 // We are searching for an earlier instruction that can make CmpInstr
4127 // redundant and that instruction will be saved in Sub.
4128 MachineInstr *Sub = nullptr;
4129 const TargetRegisterInfo *TRI = &getRegisterInfo();
4130
4131 // We iterate backward, starting from the instruction before CmpInstr and
4132 // stop when reaching the definition of a source register or done with the BB.
4133 // RI points to the instruction before CmpInstr.
4134 // If the definition is in this basic block, RE points to the definition;
4135 // otherwise, RE is the rend of the basic block.
4136 MachineBasicBlock::reverse_iterator
4137 RI = ++I.getReverse(),
4138 RE = CmpInstr.getParent() == MI->getParent()
4139 ? Def.getReverse() /* points to MI */
4140 : CmpInstr.getParent()->rend();
4141 MachineInstr *Movr0Inst = nullptr;
4142 for (; RI != RE; ++RI) {
4143 MachineInstr &Instr = *RI;
4144 // Check whether CmpInstr can be made redundant by the current instruction.
4145 if (!IsCmpZero && isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask,
4146 CmpValue, Instr)) {
4147 Sub = &Instr;
4148 break;
4149 }
4150
4151 if (Instr.modifiesRegister(X86::EFLAGS, TRI) ||
4152 Instr.readsRegister(X86::EFLAGS, TRI)) {
4153 // This instruction modifies or uses EFLAGS.
4154
4155 // MOV32r0 etc. are implemented with xor which clobbers condition code.
4156 // They are safe to move up, if the definition to EFLAGS is dead and
4157 // earlier instructions do not read or write EFLAGS.
4158 if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 &&
4159 Instr.registerDefIsDead(X86::EFLAGS, TRI)) {
4160 Movr0Inst = &Instr;
4161 continue;
4162 }
4163
4164 // We can't remove CmpInstr.
4165 return false;
4166 }
4167 }
4168
4169 // Return false if no candidates exist.
4170 if (!IsCmpZero && !Sub)
4171 return false;
4172
4173 bool IsSwapped =
4174 (SrcReg2 != 0 && Sub && Sub->getOperand(1).getReg() == SrcReg2 &&
4175 Sub->getOperand(2).getReg() == SrcReg);
4176
4177 // Scan forward from the instruction after CmpInstr for uses of EFLAGS.
4178 // It is safe to remove CmpInstr if EFLAGS is redefined or killed.
4179 // If we are done with the basic block, we need to check whether EFLAGS is
4180 // live-out.
4181 bool IsSafe = false;
4182 SmallVector<std::pair<MachineInstr*, X86::CondCode>, 4> OpsToUpdate;
4183 MachineBasicBlock::iterator E = CmpInstr.getParent()->end();
4184 for (++I; I != E; ++I) {
4185 const MachineInstr &Instr = *I;
4186 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
4187 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI);
4188 // We should check the usage if this instruction uses and updates EFLAGS.
4189 if (!UseEFLAGS && ModifyEFLAGS) {
4190 // It is safe to remove CmpInstr if EFLAGS is updated again.
4191 IsSafe = true;
4192 break;
4193 }
4194 if (!UseEFLAGS && !ModifyEFLAGS)
4195 continue;
4196
4197 // EFLAGS is used by this instruction.
4198 X86::CondCode OldCC = X86::COND_INVALID;
4199 if (IsCmpZero || IsSwapped) {
4200 // We decode the condition code from opcode.
4201 if (Instr.isBranch())
4202 OldCC = X86::getCondFromBranch(Instr);
4203 else {
4204 OldCC = X86::getCondFromSETCC(Instr);
4205 if (OldCC == X86::COND_INVALID)
4206 OldCC = X86::getCondFromCMov(Instr);
4207 }
4208 if (OldCC == X86::COND_INVALID) return false;
4209 }
4210 X86::CondCode ReplacementCC = X86::COND_INVALID;
4211 if (IsCmpZero) {
4212 switch (OldCC) {
4213 default: break;
4214 case X86::COND_A: case X86::COND_AE:
4215 case X86::COND_B: case X86::COND_BE:
4216 case X86::COND_G: case X86::COND_GE:
4217 case X86::COND_L: case X86::COND_LE:
4218 case X86::COND_O: case X86::COND_NO:
4219 // CF and OF are used, we can't perform this optimization.
4220 return false;
4221 case X86::COND_S: case X86::COND_NS:
4222 // If SF is used, but the instruction doesn't update the SF, then we
4223 // can't do the optimization.
4224 if (NoSignFlag)
4225 return false;
4226 break;
4227 }
4228
4229 // If we're updating the condition code check if we have to reverse the
4230 // condition.
4231 if (ShouldUpdateCC)
4232 switch (OldCC) {
4233 default:
4234 return false;
4235 case X86::COND_E:
4236 ReplacementCC = NewCC;
4237 break;
4238 case X86::COND_NE:
4239 ReplacementCC = GetOppositeBranchCondition(NewCC);
4240 break;
4241 }
4242 } else if (IsSwapped) {
4243 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
4244 // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
4245 // We swap the condition code and synthesize the new opcode.
4246 ReplacementCC = getSwappedCondition(OldCC);
4247 if (ReplacementCC == X86::COND_INVALID) return false;
4248 }
4249
4250 if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) {
4251 // Push the MachineInstr to OpsToUpdate.
4252 // If it is safe to remove CmpInstr, the condition code of these
4253 // instructions will be modified.
4254 OpsToUpdate.push_back(std::make_pair(&*I, ReplacementCC));
4255 }
4256 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
4257 // It is safe to remove CmpInstr if EFLAGS is updated again or killed.
4258 IsSafe = true;
4259 break;
4260 }
4261 }
4262
4263 // If EFLAGS is not killed nor re-defined, we should check whether it is
4264 // live-out. If it is live-out, do not optimize.
4265 if ((IsCmpZero || IsSwapped) && !IsSafe) {
4266 MachineBasicBlock *MBB = CmpInstr.getParent();
4267 for (MachineBasicBlock *Successor : MBB->successors())
4268 if (Successor->isLiveIn(X86::EFLAGS))
4269 return false;
4270 }
4271
4272 // The instruction to be updated is either Sub or MI.
4273 Sub = IsCmpZero ? MI : Sub;
4274 // Move Movr0Inst to the appropriate place before Sub.
4275 if (Movr0Inst) {
4276 // Look backwards until we find a def that doesn't use the current EFLAGS.
4277 Def = Sub;
4278 MachineBasicBlock::reverse_iterator InsertI = Def.getReverse(),
4279 InsertE = Sub->getParent()->rend();
4280 for (; InsertI != InsertE; ++InsertI) {
4281 MachineInstr *Instr = &*InsertI;
4282 if (!Instr->readsRegister(X86::EFLAGS, TRI) &&
4283 Instr->modifiesRegister(X86::EFLAGS, TRI)) {
4284 Sub->getParent()->remove(Movr0Inst);
4285 Instr->getParent()->insert(MachineBasicBlock::iterator(Instr),
4286 Movr0Inst);
4287 break;
4288 }
4289 }
4290 if (InsertI == InsertE)
4291 return false;
4292 }
4293
4294 // Make sure Sub instruction defines EFLAGS and mark the def live.
4295 MachineOperand *FlagDef = Sub->findRegisterDefOperand(X86::EFLAGS);
4296 assert(FlagDef && "Unable to locate a def EFLAGS operand");
4297 FlagDef->setIsDead(false);
4298
4299 CmpInstr.eraseFromParent();
4300
4301 // Modify the condition code of instructions in OpsToUpdate.
4302 for (auto &Op : OpsToUpdate) {
4303 Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1)
4304 .setImm(Op.second);
4305 }
4306 return true;
4307 }
4308
4309 /// Try to remove the load by folding it to a register
4310 /// operand at the use. We fold the load instructions if load defines a virtual
4311 /// register, the virtual register is used once in the same BB, and the
4312 /// instructions in-between do not load or store, and have no side effects.
optimizeLoadInstr(MachineInstr & MI,const MachineRegisterInfo * MRI,unsigned & FoldAsLoadDefReg,MachineInstr * & DefMI) const4313 MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
4314 const MachineRegisterInfo *MRI,
4315 unsigned &FoldAsLoadDefReg,
4316 MachineInstr *&DefMI) const {
4317 // Check whether we can move DefMI here.
4318 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
4319 assert(DefMI);
4320 bool SawStore = false;
4321 if (!DefMI->isSafeToMove(nullptr, SawStore))
4322 return nullptr;
4323
4324 // Collect information about virtual register operands of MI.
4325 SmallVector<unsigned, 1> SrcOperandIds;
4326 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
4327 MachineOperand &MO = MI.getOperand(i);
4328 if (!MO.isReg())
4329 continue;
4330 Register Reg = MO.getReg();
4331 if (Reg != FoldAsLoadDefReg)
4332 continue;
4333 // Do not fold if we have a subreg use or a def.
4334 if (MO.getSubReg() || MO.isDef())
4335 return nullptr;
4336 SrcOperandIds.push_back(i);
4337 }
4338 if (SrcOperandIds.empty())
4339 return nullptr;
4340
4341 // Check whether we can fold the def into SrcOperandId.
4342 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
4343 FoldAsLoadDefReg = 0;
4344 return FoldMI;
4345 }
4346
4347 return nullptr;
4348 }
4349
4350 /// Expand a single-def pseudo instruction to a two-addr
4351 /// instruction with two undef reads of the register being defined.
4352 /// This is used for mapping:
4353 /// %xmm4 = V_SET0
4354 /// to:
4355 /// %xmm4 = PXORrr undef %xmm4, undef %xmm4
4356 ///
Expand2AddrUndef(MachineInstrBuilder & MIB,const MCInstrDesc & Desc)4357 static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
4358 const MCInstrDesc &Desc) {
4359 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
4360 Register Reg = MIB.getReg(0);
4361 MIB->setDesc(Desc);
4362
4363 // MachineInstr::addOperand() will insert explicit operands before any
4364 // implicit operands.
4365 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
4366 // But we don't trust that.
4367 assert(MIB.getReg(1) == Reg &&
4368 MIB.getReg(2) == Reg && "Misplaced operand");
4369 return true;
4370 }
4371
4372 /// Expand a single-def pseudo instruction to a two-addr
4373 /// instruction with two %k0 reads.
4374 /// This is used for mapping:
4375 /// %k4 = K_SET1
4376 /// to:
4377 /// %k4 = KXNORrr %k0, %k0
Expand2AddrKreg(MachineInstrBuilder & MIB,const MCInstrDesc & Desc,unsigned Reg)4378 static bool Expand2AddrKreg(MachineInstrBuilder &MIB,
4379 const MCInstrDesc &Desc, unsigned Reg) {
4380 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
4381 MIB->setDesc(Desc);
4382 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
4383 return true;
4384 }
4385
expandMOV32r1(MachineInstrBuilder & MIB,const TargetInstrInfo & TII,bool MinusOne)4386 static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
4387 bool MinusOne) {
4388 MachineBasicBlock &MBB = *MIB->getParent();
4389 DebugLoc DL = MIB->getDebugLoc();
4390 Register Reg = MIB.getReg(0);
4391
4392 // Insert the XOR.
4393 BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg)
4394 .addReg(Reg, RegState::Undef)
4395 .addReg(Reg, RegState::Undef);
4396
4397 // Turn the pseudo into an INC or DEC.
4398 MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
4399 MIB.addReg(Reg);
4400
4401 return true;
4402 }
4403
ExpandMOVImmSExti8(MachineInstrBuilder & MIB,const TargetInstrInfo & TII,const X86Subtarget & Subtarget)4404 static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
4405 const TargetInstrInfo &TII,
4406 const X86Subtarget &Subtarget) {
4407 MachineBasicBlock &MBB = *MIB->getParent();
4408 DebugLoc DL = MIB->getDebugLoc();
4409 int64_t Imm = MIB->getOperand(1).getImm();
4410 assert(Imm != 0 && "Using push/pop for 0 is not efficient.");
4411 MachineBasicBlock::iterator I = MIB.getInstr();
4412
4413 int StackAdjustment;
4414
4415 if (Subtarget.is64Bit()) {
4416 assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||
4417 MIB->getOpcode() == X86::MOV32ImmSExti8);
4418
4419 // Can't use push/pop lowering if the function might write to the red zone.
4420 X86MachineFunctionInfo *X86FI =
4421 MBB.getParent()->getInfo<X86MachineFunctionInfo>();
4422 if (X86FI->getUsesRedZone()) {
4423 MIB->setDesc(TII.get(MIB->getOpcode() ==
4424 X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri));
4425 return true;
4426 }
4427
4428 // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and
4429 // widen the register if necessary.
4430 StackAdjustment = 8;
4431 BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm);
4432 MIB->setDesc(TII.get(X86::POP64r));
4433 MIB->getOperand(0)
4434 .setReg(getX86SubSuperRegister(MIB.getReg(0), 64));
4435 } else {
4436 assert(MIB->getOpcode() == X86::MOV32ImmSExti8);
4437 StackAdjustment = 4;
4438 BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm);
4439 MIB->setDesc(TII.get(X86::POP32r));
4440 }
4441 MIB->RemoveOperand(1);
4442 MIB->addImplicitDefUseOperands(*MBB.getParent());
4443
4444 // Build CFI if necessary.
4445 MachineFunction &MF = *MBB.getParent();
4446 const X86FrameLowering *TFL = Subtarget.getFrameLowering();
4447 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
4448 bool NeedsDwarfCFI = !IsWin64Prologue && MF.needsFrameMoves();
4449 bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
4450 if (EmitCFI) {
4451 TFL->BuildCFI(MBB, I, DL,
4452 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
4453 TFL->BuildCFI(MBB, std::next(I), DL,
4454 MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment));
4455 }
4456
4457 return true;
4458 }
4459
4460 // LoadStackGuard has so far only been implemented for 64-bit MachO. Different
4461 // code sequence is needed for other targets.
expandLoadStackGuard(MachineInstrBuilder & MIB,const TargetInstrInfo & TII)4462 static void expandLoadStackGuard(MachineInstrBuilder &MIB,
4463 const TargetInstrInfo &TII) {
4464 MachineBasicBlock &MBB = *MIB->getParent();
4465 DebugLoc DL = MIB->getDebugLoc();
4466 Register Reg = MIB.getReg(0);
4467 const GlobalValue *GV =
4468 cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
4469 auto Flags = MachineMemOperand::MOLoad |
4470 MachineMemOperand::MODereferenceable |
4471 MachineMemOperand::MOInvariant;
4472 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
4473 MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, Align(8));
4474 MachineBasicBlock::iterator I = MIB.getInstr();
4475
4476 BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
4477 .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0)
4478 .addMemOperand(MMO);
4479 MIB->setDebugLoc(DL);
4480 MIB->setDesc(TII.get(X86::MOV64rm));
4481 MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
4482 }
4483
expandXorFP(MachineInstrBuilder & MIB,const TargetInstrInfo & TII)4484 static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) {
4485 MachineBasicBlock &MBB = *MIB->getParent();
4486 MachineFunction &MF = *MBB.getParent();
4487 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
4488 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4489 unsigned XorOp =
4490 MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
4491 MIB->setDesc(TII.get(XorOp));
4492 MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef);
4493 return true;
4494 }
4495
4496 // This is used to handle spills for 128/256-bit registers when we have AVX512,
4497 // but not VLX. If it uses an extended register we need to use an instruction
4498 // that loads the lower 128/256-bit, but is available with only AVX512F.
expandNOVLXLoad(MachineInstrBuilder & MIB,const TargetRegisterInfo * TRI,const MCInstrDesc & LoadDesc,const MCInstrDesc & BroadcastDesc,unsigned SubIdx)4499 static bool expandNOVLXLoad(MachineInstrBuilder &MIB,
4500 const TargetRegisterInfo *TRI,
4501 const MCInstrDesc &LoadDesc,
4502 const MCInstrDesc &BroadcastDesc,
4503 unsigned SubIdx) {
4504 Register DestReg = MIB.getReg(0);
4505 // Check if DestReg is XMM16-31 or YMM16-31.
4506 if (TRI->getEncodingValue(DestReg) < 16) {
4507 // We can use a normal VEX encoded load.
4508 MIB->setDesc(LoadDesc);
4509 } else {
4510 // Use a 128/256-bit VBROADCAST instruction.
4511 MIB->setDesc(BroadcastDesc);
4512 // Change the destination to a 512-bit register.
4513 DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
4514 MIB->getOperand(0).setReg(DestReg);
4515 }
4516 return true;
4517 }
4518
4519 // This is used to handle spills for 128/256-bit registers when we have AVX512,
4520 // but not VLX. If it uses an extended register we need to use an instruction
4521 // that stores the lower 128/256-bit, but is available with only AVX512F.
expandNOVLXStore(MachineInstrBuilder & MIB,const TargetRegisterInfo * TRI,const MCInstrDesc & StoreDesc,const MCInstrDesc & ExtractDesc,unsigned SubIdx)4522 static bool expandNOVLXStore(MachineInstrBuilder &MIB,
4523 const TargetRegisterInfo *TRI,
4524 const MCInstrDesc &StoreDesc,
4525 const MCInstrDesc &ExtractDesc,
4526 unsigned SubIdx) {
4527 Register SrcReg = MIB.getReg(X86::AddrNumOperands);
4528 // Check if DestReg is XMM16-31 or YMM16-31.
4529 if (TRI->getEncodingValue(SrcReg) < 16) {
4530 // We can use a normal VEX encoded store.
4531 MIB->setDesc(StoreDesc);
4532 } else {
4533 // Use a VEXTRACTF instruction.
4534 MIB->setDesc(ExtractDesc);
4535 // Change the destination to a 512-bit register.
4536 SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
4537 MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg);
4538 MIB.addImm(0x0); // Append immediate to extract from the lower bits.
4539 }
4540
4541 return true;
4542 }
4543
expandSHXDROT(MachineInstrBuilder & MIB,const MCInstrDesc & Desc)4544 static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) {
4545 MIB->setDesc(Desc);
4546 int64_t ShiftAmt = MIB->getOperand(2).getImm();
4547 // Temporarily remove the immediate so we can add another source register.
4548 MIB->RemoveOperand(2);
4549 // Add the register. Don't copy the kill flag if there is one.
4550 MIB.addReg(MIB.getReg(1),
4551 getUndefRegState(MIB->getOperand(1).isUndef()));
4552 // Add back the immediate.
4553 MIB.addImm(ShiftAmt);
4554 return true;
4555 }
4556
expandPostRAPseudo(MachineInstr & MI) const4557 bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
4558 bool HasAVX = Subtarget.hasAVX();
4559 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
4560 switch (MI.getOpcode()) {
4561 case X86::MOV32r0:
4562 return Expand2AddrUndef(MIB, get(X86::XOR32rr));
4563 case X86::MOV32r1:
4564 return expandMOV32r1(MIB, *this, /*MinusOne=*/ false);
4565 case X86::MOV32r_1:
4566 return expandMOV32r1(MIB, *this, /*MinusOne=*/ true);
4567 case X86::MOV32ImmSExti8:
4568 case X86::MOV64ImmSExti8:
4569 return ExpandMOVImmSExti8(MIB, *this, Subtarget);
4570 case X86::SETB_C32r:
4571 return Expand2AddrUndef(MIB, get(X86::SBB32rr));
4572 case X86::SETB_C64r:
4573 return Expand2AddrUndef(MIB, get(X86::SBB64rr));
4574 case X86::MMX_SET0:
4575 return Expand2AddrUndef(MIB, get(X86::MMX_PXORirr));
4576 case X86::V_SET0:
4577 case X86::FsFLD0SS:
4578 case X86::FsFLD0SD:
4579 case X86::FsFLD0F128:
4580 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
4581 case X86::AVX_SET0: {
4582 assert(HasAVX && "AVX not supported");
4583 const TargetRegisterInfo *TRI = &getRegisterInfo();
4584 Register SrcReg = MIB.getReg(0);
4585 Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
4586 MIB->getOperand(0).setReg(XReg);
4587 Expand2AddrUndef(MIB, get(X86::VXORPSrr));
4588 MIB.addReg(SrcReg, RegState::ImplicitDefine);
4589 return true;
4590 }
4591 case X86::AVX512_128_SET0:
4592 case X86::AVX512_FsFLD0SS:
4593 case X86::AVX512_FsFLD0SD:
4594 case X86::AVX512_FsFLD0F128: {
4595 bool HasVLX = Subtarget.hasVLX();
4596 Register SrcReg = MIB.getReg(0);
4597 const TargetRegisterInfo *TRI = &getRegisterInfo();
4598 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16)
4599 return Expand2AddrUndef(MIB,
4600 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
4601 // Extended register without VLX. Use a larger XOR.
4602 SrcReg =
4603 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
4604 MIB->getOperand(0).setReg(SrcReg);
4605 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
4606 }
4607 case X86::AVX512_256_SET0:
4608 case X86::AVX512_512_SET0: {
4609 bool HasVLX = Subtarget.hasVLX();
4610 Register SrcReg = MIB.getReg(0);
4611 const TargetRegisterInfo *TRI = &getRegisterInfo();
4612 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) {
4613 Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
4614 MIB->getOperand(0).setReg(XReg);
4615 Expand2AddrUndef(MIB,
4616 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
4617 MIB.addReg(SrcReg, RegState::ImplicitDefine);
4618 return true;
4619 }
4620 if (MI.getOpcode() == X86::AVX512_256_SET0) {
4621 // No VLX so we must reference a zmm.
4622 unsigned ZReg =
4623 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
4624 MIB->getOperand(0).setReg(ZReg);
4625 }
4626 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
4627 }
4628 case X86::V_SETALLONES:
4629 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
4630 case X86::AVX2_SETALLONES:
4631 return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
4632 case X86::AVX1_SETALLONES: {
4633 Register Reg = MIB.getReg(0);
4634 // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS.
4635 MIB->setDesc(get(X86::VCMPPSYrri));
4636 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf);
4637 return true;
4638 }
4639 case X86::AVX512_512_SETALLONES: {
4640 Register Reg = MIB.getReg(0);
4641 MIB->setDesc(get(X86::VPTERNLOGDZrri));
4642 // VPTERNLOGD needs 3 register inputs and an immediate.
4643 // 0xff will return 1s for any input.
4644 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef)
4645 .addReg(Reg, RegState::Undef).addImm(0xff);
4646 return true;
4647 }
4648 case X86::AVX512_512_SEXT_MASK_32:
4649 case X86::AVX512_512_SEXT_MASK_64: {
4650 Register Reg = MIB.getReg(0);
4651 Register MaskReg = MIB.getReg(1);
4652 unsigned MaskState = getRegState(MIB->getOperand(1));
4653 unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ?
4654 X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz;
4655 MI.RemoveOperand(1);
4656 MIB->setDesc(get(Opc));
4657 // VPTERNLOG needs 3 register inputs and an immediate.
4658 // 0xff will return 1s for any input.
4659 MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState)
4660 .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff);
4661 return true;
4662 }
4663 case X86::VMOVAPSZ128rm_NOVLX:
4664 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm),
4665 get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
4666 case X86::VMOVUPSZ128rm_NOVLX:
4667 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm),
4668 get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
4669 case X86::VMOVAPSZ256rm_NOVLX:
4670 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm),
4671 get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
4672 case X86::VMOVUPSZ256rm_NOVLX:
4673 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm),
4674 get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
4675 case X86::VMOVAPSZ128mr_NOVLX:
4676 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr),
4677 get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
4678 case X86::VMOVUPSZ128mr_NOVLX:
4679 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr),
4680 get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
4681 case X86::VMOVAPSZ256mr_NOVLX:
4682 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr),
4683 get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
4684 case X86::VMOVUPSZ256mr_NOVLX:
4685 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
4686 get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
4687 case X86::MOV32ri64: {
4688 Register Reg = MIB.getReg(0);
4689 Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
4690 MI.setDesc(get(X86::MOV32ri));
4691 MIB->getOperand(0).setReg(Reg32);
4692 MIB.addReg(Reg, RegState::ImplicitDefine);
4693 return true;
4694 }
4695
4696 // KNL does not recognize dependency-breaking idioms for mask registers,
4697 // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1.
4698 // Using %k0 as the undef input register is a performance heuristic based
4699 // on the assumption that %k0 is used less frequently than the other mask
4700 // registers, since it is not usable as a write mask.
4701 // FIXME: A more advanced approach would be to choose the best input mask
4702 // register based on context.
4703 case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0);
4704 case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0);
4705 case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0);
4706 case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0);
4707 case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0);
4708 case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0);
4709 case TargetOpcode::LOAD_STACK_GUARD:
4710 expandLoadStackGuard(MIB, *this);
4711 return true;
4712 case X86::XOR64_FP:
4713 case X86::XOR32_FP:
4714 return expandXorFP(MIB, *this);
4715 case X86::SHLDROT32ri: return expandSHXDROT(MIB, get(X86::SHLD32rri8));
4716 case X86::SHLDROT64ri: return expandSHXDROT(MIB, get(X86::SHLD64rri8));
4717 case X86::SHRDROT32ri: return expandSHXDROT(MIB, get(X86::SHRD32rri8));
4718 case X86::SHRDROT64ri: return expandSHXDROT(MIB, get(X86::SHRD64rri8));
4719 case X86::ADD8rr_DB: MIB->setDesc(get(X86::OR8rr)); break;
4720 case X86::ADD16rr_DB: MIB->setDesc(get(X86::OR16rr)); break;
4721 case X86::ADD32rr_DB: MIB->setDesc(get(X86::OR32rr)); break;
4722 case X86::ADD64rr_DB: MIB->setDesc(get(X86::OR64rr)); break;
4723 case X86::ADD8ri_DB: MIB->setDesc(get(X86::OR8ri)); break;
4724 case X86::ADD16ri_DB: MIB->setDesc(get(X86::OR16ri)); break;
4725 case X86::ADD32ri_DB: MIB->setDesc(get(X86::OR32ri)); break;
4726 case X86::ADD64ri32_DB: MIB->setDesc(get(X86::OR64ri32)); break;
4727 case X86::ADD16ri8_DB: MIB->setDesc(get(X86::OR16ri8)); break;
4728 case X86::ADD32ri8_DB: MIB->setDesc(get(X86::OR32ri8)); break;
4729 case X86::ADD64ri8_DB: MIB->setDesc(get(X86::OR64ri8)); break;
4730 }
4731 return false;
4732 }
4733
4734 /// Return true for all instructions that only update
4735 /// the first 32 or 64-bits of the destination register and leave the rest
4736 /// unmodified. This can be used to avoid folding loads if the instructions
4737 /// only update part of the destination register, and the non-updated part is
4738 /// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
4739 /// instructions breaks the partial register dependency and it can improve
4740 /// performance. e.g.:
4741 ///
4742 /// movss (%rdi), %xmm0
4743 /// cvtss2sd %xmm0, %xmm0
4744 ///
4745 /// Instead of
4746 /// cvtss2sd (%rdi), %xmm0
4747 ///
4748 /// FIXME: This should be turned into a TSFlags.
4749 ///
hasPartialRegUpdate(unsigned Opcode,const X86Subtarget & Subtarget,bool ForLoadFold=false)4750 static bool hasPartialRegUpdate(unsigned Opcode,
4751 const X86Subtarget &Subtarget,
4752 bool ForLoadFold = false) {
4753 switch (Opcode) {
4754 case X86::CVTSI2SSrr:
4755 case X86::CVTSI2SSrm:
4756 case X86::CVTSI642SSrr:
4757 case X86::CVTSI642SSrm:
4758 case X86::CVTSI2SDrr:
4759 case X86::CVTSI2SDrm:
4760 case X86::CVTSI642SDrr:
4761 case X86::CVTSI642SDrm:
4762 // Load folding won't effect the undef register update since the input is
4763 // a GPR.
4764 return !ForLoadFold;
4765 case X86::CVTSD2SSrr:
4766 case X86::CVTSD2SSrm:
4767 case X86::CVTSS2SDrr:
4768 case X86::CVTSS2SDrm:
4769 case X86::MOVHPDrm:
4770 case X86::MOVHPSrm:
4771 case X86::MOVLPDrm:
4772 case X86::MOVLPSrm:
4773 case X86::RCPSSr:
4774 case X86::RCPSSm:
4775 case X86::RCPSSr_Int:
4776 case X86::RCPSSm_Int:
4777 case X86::ROUNDSDr:
4778 case X86::ROUNDSDm:
4779 case X86::ROUNDSSr:
4780 case X86::ROUNDSSm:
4781 case X86::RSQRTSSr:
4782 case X86::RSQRTSSm:
4783 case X86::RSQRTSSr_Int:
4784 case X86::RSQRTSSm_Int:
4785 case X86::SQRTSSr:
4786 case X86::SQRTSSm:
4787 case X86::SQRTSSr_Int:
4788 case X86::SQRTSSm_Int:
4789 case X86::SQRTSDr:
4790 case X86::SQRTSDm:
4791 case X86::SQRTSDr_Int:
4792 case X86::SQRTSDm_Int:
4793 return true;
4794 // GPR
4795 case X86::POPCNT32rm:
4796 case X86::POPCNT32rr:
4797 case X86::POPCNT64rm:
4798 case X86::POPCNT64rr:
4799 return Subtarget.hasPOPCNTFalseDeps();
4800 case X86::LZCNT32rm:
4801 case X86::LZCNT32rr:
4802 case X86::LZCNT64rm:
4803 case X86::LZCNT64rr:
4804 case X86::TZCNT32rm:
4805 case X86::TZCNT32rr:
4806 case X86::TZCNT64rm:
4807 case X86::TZCNT64rr:
4808 return Subtarget.hasLZCNTFalseDeps();
4809 }
4810
4811 return false;
4812 }
4813
4814 /// Inform the BreakFalseDeps pass how many idle
4815 /// instructions we would like before a partial register update.
getPartialRegUpdateClearance(const MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI) const4816 unsigned X86InstrInfo::getPartialRegUpdateClearance(
4817 const MachineInstr &MI, unsigned OpNum,
4818 const TargetRegisterInfo *TRI) const {
4819 if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget))
4820 return 0;
4821
4822 // If MI is marked as reading Reg, the partial register update is wanted.
4823 const MachineOperand &MO = MI.getOperand(0);
4824 Register Reg = MO.getReg();
4825 if (Register::isVirtualRegister(Reg)) {
4826 if (MO.readsReg() || MI.readsVirtualRegister(Reg))
4827 return 0;
4828 } else {
4829 if (MI.readsRegister(Reg, TRI))
4830 return 0;
4831 }
4832
4833 // If any instructions in the clearance range are reading Reg, insert a
4834 // dependency breaking instruction, which is inexpensive and is likely to
4835 // be hidden in other instruction's cycles.
4836 return PartialRegUpdateClearance;
4837 }
4838
4839 // Return true for any instruction the copies the high bits of the first source
4840 // operand into the unused high bits of the destination operand.
4841 // Also returns true for instructions that have two inputs where one may
4842 // be undef and we want it to use the same register as the other input.
hasUndefRegUpdate(unsigned Opcode,unsigned OpNum,bool ForLoadFold=false)4843 static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
4844 bool ForLoadFold = false) {
4845 // Set the OpNum parameter to the first source operand.
4846 switch (Opcode) {
4847 case X86::MMX_PUNPCKHBWirr:
4848 case X86::MMX_PUNPCKHWDirr:
4849 case X86::MMX_PUNPCKHDQirr:
4850 case X86::MMX_PUNPCKLBWirr:
4851 case X86::MMX_PUNPCKLWDirr:
4852 case X86::MMX_PUNPCKLDQirr:
4853 case X86::MOVHLPSrr:
4854 case X86::PACKSSWBrr:
4855 case X86::PACKUSWBrr:
4856 case X86::PACKSSDWrr:
4857 case X86::PACKUSDWrr:
4858 case X86::PUNPCKHBWrr:
4859 case X86::PUNPCKLBWrr:
4860 case X86::PUNPCKHWDrr:
4861 case X86::PUNPCKLWDrr:
4862 case X86::PUNPCKHDQrr:
4863 case X86::PUNPCKLDQrr:
4864 case X86::PUNPCKHQDQrr:
4865 case X86::PUNPCKLQDQrr:
4866 case X86::SHUFPDrri:
4867 case X86::SHUFPSrri:
4868 // These instructions are sometimes used with an undef first or second
4869 // source. Return true here so BreakFalseDeps will assign this source to the
4870 // same register as the first source to avoid a false dependency.
4871 // Operand 1 of these instructions is tied so they're separate from their
4872 // VEX counterparts.
4873 return OpNum == 2 && !ForLoadFold;
4874
4875 case X86::VMOVLHPSrr:
4876 case X86::VMOVLHPSZrr:
4877 case X86::VPACKSSWBrr:
4878 case X86::VPACKUSWBrr:
4879 case X86::VPACKSSDWrr:
4880 case X86::VPACKUSDWrr:
4881 case X86::VPACKSSWBZ128rr:
4882 case X86::VPACKUSWBZ128rr:
4883 case X86::VPACKSSDWZ128rr:
4884 case X86::VPACKUSDWZ128rr:
4885 case X86::VPERM2F128rr:
4886 case X86::VPERM2I128rr:
4887 case X86::VSHUFF32X4Z256rri:
4888 case X86::VSHUFF32X4Zrri:
4889 case X86::VSHUFF64X2Z256rri:
4890 case X86::VSHUFF64X2Zrri:
4891 case X86::VSHUFI32X4Z256rri:
4892 case X86::VSHUFI32X4Zrri:
4893 case X86::VSHUFI64X2Z256rri:
4894 case X86::VSHUFI64X2Zrri:
4895 case X86::VPUNPCKHBWrr:
4896 case X86::VPUNPCKLBWrr:
4897 case X86::VPUNPCKHBWYrr:
4898 case X86::VPUNPCKLBWYrr:
4899 case X86::VPUNPCKHBWZ128rr:
4900 case X86::VPUNPCKLBWZ128rr:
4901 case X86::VPUNPCKHBWZ256rr:
4902 case X86::VPUNPCKLBWZ256rr:
4903 case X86::VPUNPCKHBWZrr:
4904 case X86::VPUNPCKLBWZrr:
4905 case X86::VPUNPCKHWDrr:
4906 case X86::VPUNPCKLWDrr:
4907 case X86::VPUNPCKHWDYrr:
4908 case X86::VPUNPCKLWDYrr:
4909 case X86::VPUNPCKHWDZ128rr:
4910 case X86::VPUNPCKLWDZ128rr:
4911 case X86::VPUNPCKHWDZ256rr:
4912 case X86::VPUNPCKLWDZ256rr:
4913 case X86::VPUNPCKHWDZrr:
4914 case X86::VPUNPCKLWDZrr:
4915 case X86::VPUNPCKHDQrr:
4916 case X86::VPUNPCKLDQrr:
4917 case X86::VPUNPCKHDQYrr:
4918 case X86::VPUNPCKLDQYrr:
4919 case X86::VPUNPCKHDQZ128rr:
4920 case X86::VPUNPCKLDQZ128rr:
4921 case X86::VPUNPCKHDQZ256rr:
4922 case X86::VPUNPCKLDQZ256rr:
4923 case X86::VPUNPCKHDQZrr:
4924 case X86::VPUNPCKLDQZrr:
4925 case X86::VPUNPCKHQDQrr:
4926 case X86::VPUNPCKLQDQrr:
4927 case X86::VPUNPCKHQDQYrr:
4928 case X86::VPUNPCKLQDQYrr:
4929 case X86::VPUNPCKHQDQZ128rr:
4930 case X86::VPUNPCKLQDQZ128rr:
4931 case X86::VPUNPCKHQDQZ256rr:
4932 case X86::VPUNPCKLQDQZ256rr:
4933 case X86::VPUNPCKHQDQZrr:
4934 case X86::VPUNPCKLQDQZrr:
4935 // These instructions are sometimes used with an undef first or second
4936 // source. Return true here so BreakFalseDeps will assign this source to the
4937 // same register as the first source to avoid a false dependency.
4938 return (OpNum == 1 || OpNum == 2) && !ForLoadFold;
4939
4940 case X86::VCVTSI2SSrr:
4941 case X86::VCVTSI2SSrm:
4942 case X86::VCVTSI2SSrr_Int:
4943 case X86::VCVTSI2SSrm_Int:
4944 case X86::VCVTSI642SSrr:
4945 case X86::VCVTSI642SSrm:
4946 case X86::VCVTSI642SSrr_Int:
4947 case X86::VCVTSI642SSrm_Int:
4948 case X86::VCVTSI2SDrr:
4949 case X86::VCVTSI2SDrm:
4950 case X86::VCVTSI2SDrr_Int:
4951 case X86::VCVTSI2SDrm_Int:
4952 case X86::VCVTSI642SDrr:
4953 case X86::VCVTSI642SDrm:
4954 case X86::VCVTSI642SDrr_Int:
4955 case X86::VCVTSI642SDrm_Int:
4956 // AVX-512
4957 case X86::VCVTSI2SSZrr:
4958 case X86::VCVTSI2SSZrm:
4959 case X86::VCVTSI2SSZrr_Int:
4960 case X86::VCVTSI2SSZrrb_Int:
4961 case X86::VCVTSI2SSZrm_Int:
4962 case X86::VCVTSI642SSZrr:
4963 case X86::VCVTSI642SSZrm:
4964 case X86::VCVTSI642SSZrr_Int:
4965 case X86::VCVTSI642SSZrrb_Int:
4966 case X86::VCVTSI642SSZrm_Int:
4967 case X86::VCVTSI2SDZrr:
4968 case X86::VCVTSI2SDZrm:
4969 case X86::VCVTSI2SDZrr_Int:
4970 case X86::VCVTSI2SDZrm_Int:
4971 case X86::VCVTSI642SDZrr:
4972 case X86::VCVTSI642SDZrm:
4973 case X86::VCVTSI642SDZrr_Int:
4974 case X86::VCVTSI642SDZrrb_Int:
4975 case X86::VCVTSI642SDZrm_Int:
4976 case X86::VCVTUSI2SSZrr:
4977 case X86::VCVTUSI2SSZrm:
4978 case X86::VCVTUSI2SSZrr_Int:
4979 case X86::VCVTUSI2SSZrrb_Int:
4980 case X86::VCVTUSI2SSZrm_Int:
4981 case X86::VCVTUSI642SSZrr:
4982 case X86::VCVTUSI642SSZrm:
4983 case X86::VCVTUSI642SSZrr_Int:
4984 case X86::VCVTUSI642SSZrrb_Int:
4985 case X86::VCVTUSI642SSZrm_Int:
4986 case X86::VCVTUSI2SDZrr:
4987 case X86::VCVTUSI2SDZrm:
4988 case X86::VCVTUSI2SDZrr_Int:
4989 case X86::VCVTUSI2SDZrm_Int:
4990 case X86::VCVTUSI642SDZrr:
4991 case X86::VCVTUSI642SDZrm:
4992 case X86::VCVTUSI642SDZrr_Int:
4993 case X86::VCVTUSI642SDZrrb_Int:
4994 case X86::VCVTUSI642SDZrm_Int:
4995 // Load folding won't effect the undef register update since the input is
4996 // a GPR.
4997 return OpNum == 1 && !ForLoadFold;
4998 case X86::VCVTSD2SSrr:
4999 case X86::VCVTSD2SSrm:
5000 case X86::VCVTSD2SSrr_Int:
5001 case X86::VCVTSD2SSrm_Int:
5002 case X86::VCVTSS2SDrr:
5003 case X86::VCVTSS2SDrm:
5004 case X86::VCVTSS2SDrr_Int:
5005 case X86::VCVTSS2SDrm_Int:
5006 case X86::VRCPSSr:
5007 case X86::VRCPSSr_Int:
5008 case X86::VRCPSSm:
5009 case X86::VRCPSSm_Int:
5010 case X86::VROUNDSDr:
5011 case X86::VROUNDSDm:
5012 case X86::VROUNDSDr_Int:
5013 case X86::VROUNDSDm_Int:
5014 case X86::VROUNDSSr:
5015 case X86::VROUNDSSm:
5016 case X86::VROUNDSSr_Int:
5017 case X86::VROUNDSSm_Int:
5018 case X86::VRSQRTSSr:
5019 case X86::VRSQRTSSr_Int:
5020 case X86::VRSQRTSSm:
5021 case X86::VRSQRTSSm_Int:
5022 case X86::VSQRTSSr:
5023 case X86::VSQRTSSr_Int:
5024 case X86::VSQRTSSm:
5025 case X86::VSQRTSSm_Int:
5026 case X86::VSQRTSDr:
5027 case X86::VSQRTSDr_Int:
5028 case X86::VSQRTSDm:
5029 case X86::VSQRTSDm_Int:
5030 // AVX-512
5031 case X86::VCVTSD2SSZrr:
5032 case X86::VCVTSD2SSZrr_Int:
5033 case X86::VCVTSD2SSZrrb_Int:
5034 case X86::VCVTSD2SSZrm:
5035 case X86::VCVTSD2SSZrm_Int:
5036 case X86::VCVTSS2SDZrr:
5037 case X86::VCVTSS2SDZrr_Int:
5038 case X86::VCVTSS2SDZrrb_Int:
5039 case X86::VCVTSS2SDZrm:
5040 case X86::VCVTSS2SDZrm_Int:
5041 case X86::VGETEXPSDZr:
5042 case X86::VGETEXPSDZrb:
5043 case X86::VGETEXPSDZm:
5044 case X86::VGETEXPSSZr:
5045 case X86::VGETEXPSSZrb:
5046 case X86::VGETEXPSSZm:
5047 case X86::VGETMANTSDZrri:
5048 case X86::VGETMANTSDZrrib:
5049 case X86::VGETMANTSDZrmi:
5050 case X86::VGETMANTSSZrri:
5051 case X86::VGETMANTSSZrrib:
5052 case X86::VGETMANTSSZrmi:
5053 case X86::VRNDSCALESDZr:
5054 case X86::VRNDSCALESDZr_Int:
5055 case X86::VRNDSCALESDZrb_Int:
5056 case X86::VRNDSCALESDZm:
5057 case X86::VRNDSCALESDZm_Int:
5058 case X86::VRNDSCALESSZr:
5059 case X86::VRNDSCALESSZr_Int:
5060 case X86::VRNDSCALESSZrb_Int:
5061 case X86::VRNDSCALESSZm:
5062 case X86::VRNDSCALESSZm_Int:
5063 case X86::VRCP14SDZrr:
5064 case X86::VRCP14SDZrm:
5065 case X86::VRCP14SSZrr:
5066 case X86::VRCP14SSZrm:
5067 case X86::VRCP28SDZr:
5068 case X86::VRCP28SDZrb:
5069 case X86::VRCP28SDZm:
5070 case X86::VRCP28SSZr:
5071 case X86::VRCP28SSZrb:
5072 case X86::VRCP28SSZm:
5073 case X86::VREDUCESSZrmi:
5074 case X86::VREDUCESSZrri:
5075 case X86::VREDUCESSZrrib:
5076 case X86::VRSQRT14SDZrr:
5077 case X86::VRSQRT14SDZrm:
5078 case X86::VRSQRT14SSZrr:
5079 case X86::VRSQRT14SSZrm:
5080 case X86::VRSQRT28SDZr:
5081 case X86::VRSQRT28SDZrb:
5082 case X86::VRSQRT28SDZm:
5083 case X86::VRSQRT28SSZr:
5084 case X86::VRSQRT28SSZrb:
5085 case X86::VRSQRT28SSZm:
5086 case X86::VSQRTSSZr:
5087 case X86::VSQRTSSZr_Int:
5088 case X86::VSQRTSSZrb_Int:
5089 case X86::VSQRTSSZm:
5090 case X86::VSQRTSSZm_Int:
5091 case X86::VSQRTSDZr:
5092 case X86::VSQRTSDZr_Int:
5093 case X86::VSQRTSDZrb_Int:
5094 case X86::VSQRTSDZm:
5095 case X86::VSQRTSDZm_Int:
5096 return OpNum == 1;
5097 case X86::VMOVSSZrrk:
5098 case X86::VMOVSDZrrk:
5099 return OpNum == 3 && !ForLoadFold;
5100 case X86::VMOVSSZrrkz:
5101 case X86::VMOVSDZrrkz:
5102 return OpNum == 2 && !ForLoadFold;
5103 }
5104
5105 return false;
5106 }
5107
5108 /// Inform the BreakFalseDeps pass how many idle instructions we would like
5109 /// before certain undef register reads.
5110 ///
5111 /// This catches the VCVTSI2SD family of instructions:
5112 ///
5113 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
5114 ///
5115 /// We should to be careful *not* to catch VXOR idioms which are presumably
5116 /// handled specially in the pipeline:
5117 ///
5118 /// vxorps undef %xmm1, undef %xmm1, %xmm1
5119 ///
5120 /// Like getPartialRegUpdateClearance, this makes a strong assumption that the
5121 /// high bits that are passed-through are not live.
5122 unsigned
getUndefRegClearance(const MachineInstr & MI,unsigned & OpNum,const TargetRegisterInfo * TRI) const5123 X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
5124 const TargetRegisterInfo *TRI) const {
5125 for (unsigned i = MI.getNumExplicitDefs(), e = MI.getNumExplicitOperands();
5126 i != e; ++i) {
5127 const MachineOperand &MO = MI.getOperand(i);
5128 if (MO.isReg() && MO.isUndef() &&
5129 Register::isPhysicalRegister(MO.getReg()) &&
5130 hasUndefRegUpdate(MI.getOpcode(), i)) {
5131 OpNum = i;
5132 return UndefRegClearance;
5133 }
5134 }
5135
5136 return 0;
5137 }
5138
breakPartialRegDependency(MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI) const5139 void X86InstrInfo::breakPartialRegDependency(
5140 MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
5141 Register Reg = MI.getOperand(OpNum).getReg();
5142 // If MI kills this register, the false dependence is already broken.
5143 if (MI.killsRegister(Reg, TRI))
5144 return;
5145
5146 if (X86::VR128RegClass.contains(Reg)) {
5147 // These instructions are all floating point domain, so xorps is the best
5148 // choice.
5149 unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
5150 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg)
5151 .addReg(Reg, RegState::Undef)
5152 .addReg(Reg, RegState::Undef);
5153 MI.addRegisterKilled(Reg, TRI, true);
5154 } else if (X86::VR256RegClass.contains(Reg)) {
5155 // Use vxorps to clear the full ymm register.
5156 // It wants to read and write the xmm sub-register.
5157 Register XReg = TRI->getSubReg(Reg, X86::sub_xmm);
5158 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg)
5159 .addReg(XReg, RegState::Undef)
5160 .addReg(XReg, RegState::Undef)
5161 .addReg(Reg, RegState::ImplicitDefine);
5162 MI.addRegisterKilled(Reg, TRI, true);
5163 } else if (X86::GR64RegClass.contains(Reg)) {
5164 // Using XOR32rr because it has shorter encoding and zeros up the upper bits
5165 // as well.
5166 Register XReg = TRI->getSubReg(Reg, X86::sub_32bit);
5167 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg)
5168 .addReg(XReg, RegState::Undef)
5169 .addReg(XReg, RegState::Undef)
5170 .addReg(Reg, RegState::ImplicitDefine);
5171 MI.addRegisterKilled(Reg, TRI, true);
5172 } else if (X86::GR32RegClass.contains(Reg)) {
5173 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg)
5174 .addReg(Reg, RegState::Undef)
5175 .addReg(Reg, RegState::Undef);
5176 MI.addRegisterKilled(Reg, TRI, true);
5177 }
5178 }
5179
addOperands(MachineInstrBuilder & MIB,ArrayRef<MachineOperand> MOs,int PtrOffset=0)5180 static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
5181 int PtrOffset = 0) {
5182 unsigned NumAddrOps = MOs.size();
5183
5184 if (NumAddrOps < 4) {
5185 // FrameIndex only - add an immediate offset (whether its zero or not).
5186 for (unsigned i = 0; i != NumAddrOps; ++i)
5187 MIB.add(MOs[i]);
5188 addOffset(MIB, PtrOffset);
5189 } else {
5190 // General Memory Addressing - we need to add any offset to an existing
5191 // offset.
5192 assert(MOs.size() == 5 && "Unexpected memory operand list length");
5193 for (unsigned i = 0; i != NumAddrOps; ++i) {
5194 const MachineOperand &MO = MOs[i];
5195 if (i == 3 && PtrOffset != 0) {
5196 MIB.addDisp(MO, PtrOffset);
5197 } else {
5198 MIB.add(MO);
5199 }
5200 }
5201 }
5202 }
5203
updateOperandRegConstraints(MachineFunction & MF,MachineInstr & NewMI,const TargetInstrInfo & TII)5204 static void updateOperandRegConstraints(MachineFunction &MF,
5205 MachineInstr &NewMI,
5206 const TargetInstrInfo &TII) {
5207 MachineRegisterInfo &MRI = MF.getRegInfo();
5208 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
5209
5210 for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
5211 MachineOperand &MO = NewMI.getOperand(Idx);
5212 // We only need to update constraints on virtual register operands.
5213 if (!MO.isReg())
5214 continue;
5215 Register Reg = MO.getReg();
5216 if (!Register::isVirtualRegister(Reg))
5217 continue;
5218
5219 auto *NewRC = MRI.constrainRegClass(
5220 Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF));
5221 if (!NewRC) {
5222 LLVM_DEBUG(
5223 dbgs() << "WARNING: Unable to update register constraint for operand "
5224 << Idx << " of instruction:\n";
5225 NewMI.dump(); dbgs() << "\n");
5226 }
5227 }
5228 }
5229
FuseTwoAddrInst(MachineFunction & MF,unsigned Opcode,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,MachineInstr & MI,const TargetInstrInfo & TII)5230 static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
5231 ArrayRef<MachineOperand> MOs,
5232 MachineBasicBlock::iterator InsertPt,
5233 MachineInstr &MI,
5234 const TargetInstrInfo &TII) {
5235 // Create the base instruction with the memory operand as the first part.
5236 // Omit the implicit operands, something BuildMI can't do.
5237 MachineInstr *NewMI =
5238 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
5239 MachineInstrBuilder MIB(MF, NewMI);
5240 addOperands(MIB, MOs);
5241
5242 // Loop over the rest of the ri operands, converting them over.
5243 unsigned NumOps = MI.getDesc().getNumOperands() - 2;
5244 for (unsigned i = 0; i != NumOps; ++i) {
5245 MachineOperand &MO = MI.getOperand(i + 2);
5246 MIB.add(MO);
5247 }
5248 for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) {
5249 MachineOperand &MO = MI.getOperand(i);
5250 MIB.add(MO);
5251 }
5252
5253 updateOperandRegConstraints(MF, *NewMI, TII);
5254
5255 MachineBasicBlock *MBB = InsertPt->getParent();
5256 MBB->insert(InsertPt, NewMI);
5257
5258 return MIB;
5259 }
5260
FuseInst(MachineFunction & MF,unsigned Opcode,unsigned OpNo,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,MachineInstr & MI,const TargetInstrInfo & TII,int PtrOffset=0)5261 static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
5262 unsigned OpNo, ArrayRef<MachineOperand> MOs,
5263 MachineBasicBlock::iterator InsertPt,
5264 MachineInstr &MI, const TargetInstrInfo &TII,
5265 int PtrOffset = 0) {
5266 // Omit the implicit operands, something BuildMI can't do.
5267 MachineInstr *NewMI =
5268 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
5269 MachineInstrBuilder MIB(MF, NewMI);
5270
5271 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
5272 MachineOperand &MO = MI.getOperand(i);
5273 if (i == OpNo) {
5274 assert(MO.isReg() && "Expected to fold into reg operand!");
5275 addOperands(MIB, MOs, PtrOffset);
5276 } else {
5277 MIB.add(MO);
5278 }
5279 }
5280
5281 updateOperandRegConstraints(MF, *NewMI, TII);
5282
5283 // Copy the NoFPExcept flag from the instruction we're fusing.
5284 if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
5285 NewMI->setFlag(MachineInstr::MIFlag::NoFPExcept);
5286
5287 MachineBasicBlock *MBB = InsertPt->getParent();
5288 MBB->insert(InsertPt, NewMI);
5289
5290 return MIB;
5291 }
5292
MakeM0Inst(const TargetInstrInfo & TII,unsigned Opcode,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,MachineInstr & MI)5293 static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
5294 ArrayRef<MachineOperand> MOs,
5295 MachineBasicBlock::iterator InsertPt,
5296 MachineInstr &MI) {
5297 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
5298 MI.getDebugLoc(), TII.get(Opcode));
5299 addOperands(MIB, MOs);
5300 return MIB.addImm(0);
5301 }
5302
foldMemoryOperandCustom(MachineFunction & MF,MachineInstr & MI,unsigned OpNum,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,unsigned Size,Align Alignment) const5303 MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
5304 MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
5305 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
5306 unsigned Size, Align Alignment) const {
5307 switch (MI.getOpcode()) {
5308 case X86::INSERTPSrr:
5309 case X86::VINSERTPSrr:
5310 case X86::VINSERTPSZrr:
5311 // Attempt to convert the load of inserted vector into a fold load
5312 // of a single float.
5313 if (OpNum == 2) {
5314 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
5315 unsigned ZMask = Imm & 15;
5316 unsigned DstIdx = (Imm >> 4) & 3;
5317 unsigned SrcIdx = (Imm >> 6) & 3;
5318
5319 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5320 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5321 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5322 if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(4)) {
5323 int PtrOffset = SrcIdx * 4;
5324 unsigned NewImm = (DstIdx << 4) | ZMask;
5325 unsigned NewOpCode =
5326 (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm :
5327 (MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm :
5328 X86::INSERTPSrm;
5329 MachineInstr *NewMI =
5330 FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
5331 NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
5332 return NewMI;
5333 }
5334 }
5335 break;
5336 case X86::MOVHLPSrr:
5337 case X86::VMOVHLPSrr:
5338 case X86::VMOVHLPSZrr:
5339 // Move the upper 64-bits of the second operand to the lower 64-bits.
5340 // To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
5341 // TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
5342 if (OpNum == 2) {
5343 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5344 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5345 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5346 if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) {
5347 unsigned NewOpCode =
5348 (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm :
5349 (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm :
5350 X86::MOVLPSrm;
5351 MachineInstr *NewMI =
5352 FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8);
5353 return NewMI;
5354 }
5355 }
5356 break;
5357 case X86::UNPCKLPDrr:
5358 // If we won't be able to fold this to the memory form of UNPCKL, use
5359 // MOVHPD instead. Done as custom because we can't have this in the load
5360 // table twice.
5361 if (OpNum == 2) {
5362 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5363 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5364 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5365 if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) {
5366 MachineInstr *NewMI =
5367 FuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this);
5368 return NewMI;
5369 }
5370 }
5371 break;
5372 }
5373
5374 return nullptr;
5375 }
5376
shouldPreventUndefRegUpdateMemFold(MachineFunction & MF,MachineInstr & MI)5377 static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF,
5378 MachineInstr &MI) {
5379 if (!hasUndefRegUpdate(MI.getOpcode(), 1, /*ForLoadFold*/true) ||
5380 !MI.getOperand(1).isReg())
5381 return false;
5382
5383 // The are two cases we need to handle depending on where in the pipeline
5384 // the folding attempt is being made.
5385 // -Register has the undef flag set.
5386 // -Register is produced by the IMPLICIT_DEF instruction.
5387
5388 if (MI.getOperand(1).isUndef())
5389 return true;
5390
5391 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5392 MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg());
5393 return VRegDef && VRegDef->isImplicitDef();
5394 }
5395
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,unsigned OpNum,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,unsigned Size,Align Alignment,bool AllowCommute) const5396 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
5397 MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
5398 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
5399 unsigned Size, Align Alignment, bool AllowCommute) const {
5400 bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
5401 bool isTwoAddrFold = false;
5402
5403 // For CPUs that favor the register form of a call or push,
5404 // do not fold loads into calls or pushes, unless optimizing for size
5405 // aggressively.
5406 if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() &&
5407 (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
5408 MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
5409 MI.getOpcode() == X86::PUSH64r))
5410 return nullptr;
5411
5412 // Avoid partial and undef register update stalls unless optimizing for size.
5413 if (!MF.getFunction().hasOptSize() &&
5414 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
5415 shouldPreventUndefRegUpdateMemFold(MF, MI)))
5416 return nullptr;
5417
5418 unsigned NumOps = MI.getDesc().getNumOperands();
5419 bool isTwoAddr =
5420 NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
5421
5422 // FIXME: AsmPrinter doesn't know how to handle
5423 // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
5424 if (MI.getOpcode() == X86::ADD32ri &&
5425 MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
5426 return nullptr;
5427
5428 // GOTTPOFF relocation loads can only be folded into add instructions.
5429 // FIXME: Need to exclude other relocations that only support specific
5430 // instructions.
5431 if (MOs.size() == X86::AddrNumOperands &&
5432 MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF &&
5433 MI.getOpcode() != X86::ADD64rr)
5434 return nullptr;
5435
5436 MachineInstr *NewMI = nullptr;
5437
5438 // Attempt to fold any custom cases we have.
5439 if (MachineInstr *CustomMI = foldMemoryOperandCustom(
5440 MF, MI, OpNum, MOs, InsertPt, Size, Alignment))
5441 return CustomMI;
5442
5443 const X86MemoryFoldTableEntry *I = nullptr;
5444
5445 // Folding a memory location into the two-address part of a two-address
5446 // instruction is different than folding it other places. It requires
5447 // replacing the *two* registers with the memory location.
5448 if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() &&
5449 MI.getOperand(1).isReg() &&
5450 MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
5451 I = lookupTwoAddrFoldTable(MI.getOpcode());
5452 isTwoAddrFold = true;
5453 } else {
5454 if (OpNum == 0) {
5455 if (MI.getOpcode() == X86::MOV32r0) {
5456 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
5457 if (NewMI)
5458 return NewMI;
5459 }
5460 }
5461
5462 I = lookupFoldTable(MI.getOpcode(), OpNum);
5463 }
5464
5465 if (I != nullptr) {
5466 unsigned Opcode = I->DstOp;
5467 MaybeAlign MinAlign =
5468 decodeMaybeAlign((I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT);
5469 if (MinAlign && Alignment < *MinAlign)
5470 return nullptr;
5471 bool NarrowToMOV32rm = false;
5472 if (Size) {
5473 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5474 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum,
5475 &RI, MF);
5476 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5477 if (Size < RCSize) {
5478 // FIXME: Allow scalar intrinsic instructions like ADDSSrm_Int.
5479 // Check if it's safe to fold the load. If the size of the object is
5480 // narrower than the load width, then it's not.
5481 if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
5482 return nullptr;
5483 // If this is a 64-bit load, but the spill slot is 32, then we can do
5484 // a 32-bit load which is implicitly zero-extended. This likely is
5485 // due to live interval analysis remat'ing a load from stack slot.
5486 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
5487 return nullptr;
5488 Opcode = X86::MOV32rm;
5489 NarrowToMOV32rm = true;
5490 }
5491 }
5492
5493 if (isTwoAddrFold)
5494 NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this);
5495 else
5496 NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
5497
5498 if (NarrowToMOV32rm) {
5499 // If this is the special case where we use a MOV32rm to load a 32-bit
5500 // value and zero-extend the top bits. Change the destination register
5501 // to a 32-bit one.
5502 Register DstReg = NewMI->getOperand(0).getReg();
5503 if (Register::isPhysicalRegister(DstReg))
5504 NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit));
5505 else
5506 NewMI->getOperand(0).setSubReg(X86::sub_32bit);
5507 }
5508 return NewMI;
5509 }
5510
5511 // If the instruction and target operand are commutable, commute the
5512 // instruction and try again.
5513 if (AllowCommute) {
5514 unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
5515 if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
5516 bool HasDef = MI.getDesc().getNumDefs();
5517 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
5518 Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
5519 Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
5520 bool Tied1 =
5521 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
5522 bool Tied2 =
5523 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
5524
5525 // If either of the commutable operands are tied to the destination
5526 // then we can not commute + fold.
5527 if ((HasDef && Reg0 == Reg1 && Tied1) ||
5528 (HasDef && Reg0 == Reg2 && Tied2))
5529 return nullptr;
5530
5531 MachineInstr *CommutedMI =
5532 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
5533 if (!CommutedMI) {
5534 // Unable to commute.
5535 return nullptr;
5536 }
5537 if (CommutedMI != &MI) {
5538 // New instruction. We can't fold from this.
5539 CommutedMI->eraseFromParent();
5540 return nullptr;
5541 }
5542
5543 // Attempt to fold with the commuted version of the instruction.
5544 NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, Size,
5545 Alignment, /*AllowCommute=*/false);
5546 if (NewMI)
5547 return NewMI;
5548
5549 // Folding failed again - undo the commute before returning.
5550 MachineInstr *UncommutedMI =
5551 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
5552 if (!UncommutedMI) {
5553 // Unable to commute.
5554 return nullptr;
5555 }
5556 if (UncommutedMI != &MI) {
5557 // New instruction. It doesn't need to be kept.
5558 UncommutedMI->eraseFromParent();
5559 return nullptr;
5560 }
5561
5562 // Return here to prevent duplicate fuse failure report.
5563 return nullptr;
5564 }
5565 }
5566
5567 // No fusion
5568 if (PrintFailedFusing && !MI.isCopy())
5569 dbgs() << "We failed to fuse operand " << OpNum << " in " << MI;
5570 return nullptr;
5571 }
5572
5573 MachineInstr *
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,int FrameIndex,LiveIntervals * LIS,VirtRegMap * VRM) const5574 X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
5575 ArrayRef<unsigned> Ops,
5576 MachineBasicBlock::iterator InsertPt,
5577 int FrameIndex, LiveIntervals *LIS,
5578 VirtRegMap *VRM) const {
5579 // Check switch flag
5580 if (NoFusing)
5581 return nullptr;
5582
5583 // Avoid partial and undef register update stalls unless optimizing for size.
5584 if (!MF.getFunction().hasOptSize() &&
5585 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
5586 shouldPreventUndefRegUpdateMemFold(MF, MI)))
5587 return nullptr;
5588
5589 // Don't fold subreg spills, or reloads that use a high subreg.
5590 for (auto Op : Ops) {
5591 MachineOperand &MO = MI.getOperand(Op);
5592 auto SubReg = MO.getSubReg();
5593 if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi))
5594 return nullptr;
5595 }
5596
5597 const MachineFrameInfo &MFI = MF.getFrameInfo();
5598 unsigned Size = MFI.getObjectSize(FrameIndex);
5599 Align Alignment = MFI.getObjectAlign(FrameIndex);
5600 // If the function stack isn't realigned we don't want to fold instructions
5601 // that need increased alignment.
5602 if (!RI.needsStackRealignment(MF))
5603 Alignment =
5604 std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign());
5605 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
5606 unsigned NewOpc = 0;
5607 unsigned RCSize = 0;
5608 switch (MI.getOpcode()) {
5609 default: return nullptr;
5610 case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
5611 case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
5612 case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
5613 case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
5614 }
5615 // Check if it's safe to fold the load. If the size of the object is
5616 // narrower than the load width, then it's not.
5617 if (Size < RCSize)
5618 return nullptr;
5619 // Change to CMPXXri r, 0 first.
5620 MI.setDesc(get(NewOpc));
5621 MI.getOperand(1).ChangeToImmediate(0);
5622 } else if (Ops.size() != 1)
5623 return nullptr;
5624
5625 return foldMemoryOperandImpl(MF, MI, Ops[0],
5626 MachineOperand::CreateFI(FrameIndex), InsertPt,
5627 Size, Alignment, /*AllowCommute=*/true);
5628 }
5629
5630 /// Check if \p LoadMI is a partial register load that we can't fold into \p MI
5631 /// because the latter uses contents that wouldn't be defined in the folded
5632 /// version. For instance, this transformation isn't legal:
5633 /// movss (%rdi), %xmm0
5634 /// addps %xmm0, %xmm0
5635 /// ->
5636 /// addps (%rdi), %xmm0
5637 ///
5638 /// But this one is:
5639 /// movss (%rdi), %xmm0
5640 /// addss %xmm0, %xmm0
5641 /// ->
5642 /// addss (%rdi), %xmm0
5643 ///
isNonFoldablePartialRegisterLoad(const MachineInstr & LoadMI,const MachineInstr & UserMI,const MachineFunction & MF)5644 static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
5645 const MachineInstr &UserMI,
5646 const MachineFunction &MF) {
5647 unsigned Opc = LoadMI.getOpcode();
5648 unsigned UserOpc = UserMI.getOpcode();
5649 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5650 const TargetRegisterClass *RC =
5651 MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg());
5652 unsigned RegSize = TRI.getRegSizeInBits(*RC);
5653
5654 if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm ||
5655 Opc == X86::MOVSSrm_alt || Opc == X86::VMOVSSrm_alt ||
5656 Opc == X86::VMOVSSZrm_alt) &&
5657 RegSize > 32) {
5658 // These instructions only load 32 bits, we can't fold them if the
5659 // destination register is wider than 32 bits (4 bytes), and its user
5660 // instruction isn't scalar (SS).
5661 switch (UserOpc) {
5662 case X86::CVTSS2SDrr_Int:
5663 case X86::VCVTSS2SDrr_Int:
5664 case X86::VCVTSS2SDZrr_Int:
5665 case X86::VCVTSS2SDZrr_Intk:
5666 case X86::VCVTSS2SDZrr_Intkz:
5667 case X86::CVTSS2SIrr_Int: case X86::CVTSS2SI64rr_Int:
5668 case X86::VCVTSS2SIrr_Int: case X86::VCVTSS2SI64rr_Int:
5669 case X86::VCVTSS2SIZrr_Int: case X86::VCVTSS2SI64Zrr_Int:
5670 case X86::CVTTSS2SIrr_Int: case X86::CVTTSS2SI64rr_Int:
5671 case X86::VCVTTSS2SIrr_Int: case X86::VCVTTSS2SI64rr_Int:
5672 case X86::VCVTTSS2SIZrr_Int: case X86::VCVTTSS2SI64Zrr_Int:
5673 case X86::VCVTSS2USIZrr_Int: case X86::VCVTSS2USI64Zrr_Int:
5674 case X86::VCVTTSS2USIZrr_Int: case X86::VCVTTSS2USI64Zrr_Int:
5675 case X86::RCPSSr_Int: case X86::VRCPSSr_Int:
5676 case X86::RSQRTSSr_Int: case X86::VRSQRTSSr_Int:
5677 case X86::ROUNDSSr_Int: case X86::VROUNDSSr_Int:
5678 case X86::COMISSrr_Int: case X86::VCOMISSrr_Int: case X86::VCOMISSZrr_Int:
5679 case X86::UCOMISSrr_Int:case X86::VUCOMISSrr_Int:case X86::VUCOMISSZrr_Int:
5680 case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int:
5681 case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int:
5682 case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int:
5683 case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int:
5684 case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int:
5685 case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int:
5686 case X86::SQRTSSr_Int: case X86::VSQRTSSr_Int: case X86::VSQRTSSZr_Int:
5687 case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int:
5688 case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz:
5689 case X86::VCMPSSZrr_Intk:
5690 case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz:
5691 case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz:
5692 case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz:
5693 case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz:
5694 case X86::VSQRTSSZr_Intk: case X86::VSQRTSSZr_Intkz:
5695 case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz:
5696 case X86::VFMADDSS4rr_Int: case X86::VFNMADDSS4rr_Int:
5697 case X86::VFMSUBSS4rr_Int: case X86::VFNMSUBSS4rr_Int:
5698 case X86::VFMADD132SSr_Int: case X86::VFNMADD132SSr_Int:
5699 case X86::VFMADD213SSr_Int: case X86::VFNMADD213SSr_Int:
5700 case X86::VFMADD231SSr_Int: case X86::VFNMADD231SSr_Int:
5701 case X86::VFMSUB132SSr_Int: case X86::VFNMSUB132SSr_Int:
5702 case X86::VFMSUB213SSr_Int: case X86::VFNMSUB213SSr_Int:
5703 case X86::VFMSUB231SSr_Int: case X86::VFNMSUB231SSr_Int:
5704 case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int:
5705 case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int:
5706 case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int:
5707 case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int:
5708 case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int:
5709 case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int:
5710 case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk:
5711 case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk:
5712 case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk:
5713 case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk:
5714 case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk:
5715 case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk:
5716 case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz:
5717 case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz:
5718 case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz:
5719 case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz:
5720 case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz:
5721 case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz:
5722 case X86::VFIXUPIMMSSZrri:
5723 case X86::VFIXUPIMMSSZrrik:
5724 case X86::VFIXUPIMMSSZrrikz:
5725 case X86::VFPCLASSSSZrr:
5726 case X86::VFPCLASSSSZrrk:
5727 case X86::VGETEXPSSZr:
5728 case X86::VGETEXPSSZrk:
5729 case X86::VGETEXPSSZrkz:
5730 case X86::VGETMANTSSZrri:
5731 case X86::VGETMANTSSZrrik:
5732 case X86::VGETMANTSSZrrikz:
5733 case X86::VRANGESSZrri:
5734 case X86::VRANGESSZrrik:
5735 case X86::VRANGESSZrrikz:
5736 case X86::VRCP14SSZrr:
5737 case X86::VRCP14SSZrrk:
5738 case X86::VRCP14SSZrrkz:
5739 case X86::VRCP28SSZr:
5740 case X86::VRCP28SSZrk:
5741 case X86::VRCP28SSZrkz:
5742 case X86::VREDUCESSZrri:
5743 case X86::VREDUCESSZrrik:
5744 case X86::VREDUCESSZrrikz:
5745 case X86::VRNDSCALESSZr_Int:
5746 case X86::VRNDSCALESSZr_Intk:
5747 case X86::VRNDSCALESSZr_Intkz:
5748 case X86::VRSQRT14SSZrr:
5749 case X86::VRSQRT14SSZrrk:
5750 case X86::VRSQRT14SSZrrkz:
5751 case X86::VRSQRT28SSZr:
5752 case X86::VRSQRT28SSZrk:
5753 case X86::VRSQRT28SSZrkz:
5754 case X86::VSCALEFSSZrr:
5755 case X86::VSCALEFSSZrrk:
5756 case X86::VSCALEFSSZrrkz:
5757 return false;
5758 default:
5759 return true;
5760 }
5761 }
5762
5763 if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm ||
5764 Opc == X86::MOVSDrm_alt || Opc == X86::VMOVSDrm_alt ||
5765 Opc == X86::VMOVSDZrm_alt) &&
5766 RegSize > 64) {
5767 // These instructions only load 64 bits, we can't fold them if the
5768 // destination register is wider than 64 bits (8 bytes), and its user
5769 // instruction isn't scalar (SD).
5770 switch (UserOpc) {
5771 case X86::CVTSD2SSrr_Int:
5772 case X86::VCVTSD2SSrr_Int:
5773 case X86::VCVTSD2SSZrr_Int:
5774 case X86::VCVTSD2SSZrr_Intk:
5775 case X86::VCVTSD2SSZrr_Intkz:
5776 case X86::CVTSD2SIrr_Int: case X86::CVTSD2SI64rr_Int:
5777 case X86::VCVTSD2SIrr_Int: case X86::VCVTSD2SI64rr_Int:
5778 case X86::VCVTSD2SIZrr_Int: case X86::VCVTSD2SI64Zrr_Int:
5779 case X86::CVTTSD2SIrr_Int: case X86::CVTTSD2SI64rr_Int:
5780 case X86::VCVTTSD2SIrr_Int: case X86::VCVTTSD2SI64rr_Int:
5781 case X86::VCVTTSD2SIZrr_Int: case X86::VCVTTSD2SI64Zrr_Int:
5782 case X86::VCVTSD2USIZrr_Int: case X86::VCVTSD2USI64Zrr_Int:
5783 case X86::VCVTTSD2USIZrr_Int: case X86::VCVTTSD2USI64Zrr_Int:
5784 case X86::ROUNDSDr_Int: case X86::VROUNDSDr_Int:
5785 case X86::COMISDrr_Int: case X86::VCOMISDrr_Int: case X86::VCOMISDZrr_Int:
5786 case X86::UCOMISDrr_Int:case X86::VUCOMISDrr_Int:case X86::VUCOMISDZrr_Int:
5787 case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int:
5788 case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int:
5789 case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int:
5790 case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int:
5791 case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int:
5792 case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int:
5793 case X86::SQRTSDr_Int: case X86::VSQRTSDr_Int: case X86::VSQRTSDZr_Int:
5794 case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int:
5795 case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz:
5796 case X86::VCMPSDZrr_Intk:
5797 case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz:
5798 case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz:
5799 case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz:
5800 case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz:
5801 case X86::VSQRTSDZr_Intk: case X86::VSQRTSDZr_Intkz:
5802 case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz:
5803 case X86::VFMADDSD4rr_Int: case X86::VFNMADDSD4rr_Int:
5804 case X86::VFMSUBSD4rr_Int: case X86::VFNMSUBSD4rr_Int:
5805 case X86::VFMADD132SDr_Int: case X86::VFNMADD132SDr_Int:
5806 case X86::VFMADD213SDr_Int: case X86::VFNMADD213SDr_Int:
5807 case X86::VFMADD231SDr_Int: case X86::VFNMADD231SDr_Int:
5808 case X86::VFMSUB132SDr_Int: case X86::VFNMSUB132SDr_Int:
5809 case X86::VFMSUB213SDr_Int: case X86::VFNMSUB213SDr_Int:
5810 case X86::VFMSUB231SDr_Int: case X86::VFNMSUB231SDr_Int:
5811 case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int:
5812 case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int:
5813 case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int:
5814 case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int:
5815 case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int:
5816 case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int:
5817 case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk:
5818 case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk:
5819 case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk:
5820 case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk:
5821 case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk:
5822 case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk:
5823 case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz:
5824 case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz:
5825 case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz:
5826 case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz:
5827 case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz:
5828 case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz:
5829 case X86::VFIXUPIMMSDZrri:
5830 case X86::VFIXUPIMMSDZrrik:
5831 case X86::VFIXUPIMMSDZrrikz:
5832 case X86::VFPCLASSSDZrr:
5833 case X86::VFPCLASSSDZrrk:
5834 case X86::VGETEXPSDZr:
5835 case X86::VGETEXPSDZrk:
5836 case X86::VGETEXPSDZrkz:
5837 case X86::VGETMANTSDZrri:
5838 case X86::VGETMANTSDZrrik:
5839 case X86::VGETMANTSDZrrikz:
5840 case X86::VRANGESDZrri:
5841 case X86::VRANGESDZrrik:
5842 case X86::VRANGESDZrrikz:
5843 case X86::VRCP14SDZrr:
5844 case X86::VRCP14SDZrrk:
5845 case X86::VRCP14SDZrrkz:
5846 case X86::VRCP28SDZr:
5847 case X86::VRCP28SDZrk:
5848 case X86::VRCP28SDZrkz:
5849 case X86::VREDUCESDZrri:
5850 case X86::VREDUCESDZrrik:
5851 case X86::VREDUCESDZrrikz:
5852 case X86::VRNDSCALESDZr_Int:
5853 case X86::VRNDSCALESDZr_Intk:
5854 case X86::VRNDSCALESDZr_Intkz:
5855 case X86::VRSQRT14SDZrr:
5856 case X86::VRSQRT14SDZrrk:
5857 case X86::VRSQRT14SDZrrkz:
5858 case X86::VRSQRT28SDZr:
5859 case X86::VRSQRT28SDZrk:
5860 case X86::VRSQRT28SDZrkz:
5861 case X86::VSCALEFSDZrr:
5862 case X86::VSCALEFSDZrrk:
5863 case X86::VSCALEFSDZrrkz:
5864 return false;
5865 default:
5866 return true;
5867 }
5868 }
5869
5870 return false;
5871 }
5872
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,MachineInstr & LoadMI,LiveIntervals * LIS) const5873 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
5874 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
5875 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
5876 LiveIntervals *LIS) const {
5877
5878 // TODO: Support the case where LoadMI loads a wide register, but MI
5879 // only uses a subreg.
5880 for (auto Op : Ops) {
5881 if (MI.getOperand(Op).getSubReg())
5882 return nullptr;
5883 }
5884
5885 // If loading from a FrameIndex, fold directly from the FrameIndex.
5886 unsigned NumOps = LoadMI.getDesc().getNumOperands();
5887 int FrameIndex;
5888 if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
5889 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
5890 return nullptr;
5891 return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS);
5892 }
5893
5894 // Check switch flag
5895 if (NoFusing) return nullptr;
5896
5897 // Avoid partial and undef register update stalls unless optimizing for size.
5898 if (!MF.getFunction().hasOptSize() &&
5899 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
5900 shouldPreventUndefRegUpdateMemFold(MF, MI)))
5901 return nullptr;
5902
5903 // Determine the alignment of the load.
5904 Align Alignment;
5905 if (LoadMI.hasOneMemOperand())
5906 Alignment = (*LoadMI.memoperands_begin())->getAlign();
5907 else
5908 switch (LoadMI.getOpcode()) {
5909 case X86::AVX512_512_SET0:
5910 case X86::AVX512_512_SETALLONES:
5911 Alignment = Align(64);
5912 break;
5913 case X86::AVX2_SETALLONES:
5914 case X86::AVX1_SETALLONES:
5915 case X86::AVX_SET0:
5916 case X86::AVX512_256_SET0:
5917 Alignment = Align(32);
5918 break;
5919 case X86::V_SET0:
5920 case X86::V_SETALLONES:
5921 case X86::AVX512_128_SET0:
5922 case X86::FsFLD0F128:
5923 case X86::AVX512_FsFLD0F128:
5924 Alignment = Align(16);
5925 break;
5926 case X86::MMX_SET0:
5927 case X86::FsFLD0SD:
5928 case X86::AVX512_FsFLD0SD:
5929 Alignment = Align(8);
5930 break;
5931 case X86::FsFLD0SS:
5932 case X86::AVX512_FsFLD0SS:
5933 Alignment = Align(4);
5934 break;
5935 default:
5936 return nullptr;
5937 }
5938 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
5939 unsigned NewOpc = 0;
5940 switch (MI.getOpcode()) {
5941 default: return nullptr;
5942 case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
5943 case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
5944 case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
5945 case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
5946 }
5947 // Change to CMPXXri r, 0 first.
5948 MI.setDesc(get(NewOpc));
5949 MI.getOperand(1).ChangeToImmediate(0);
5950 } else if (Ops.size() != 1)
5951 return nullptr;
5952
5953 // Make sure the subregisters match.
5954 // Otherwise we risk changing the size of the load.
5955 if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg())
5956 return nullptr;
5957
5958 SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
5959 switch (LoadMI.getOpcode()) {
5960 case X86::MMX_SET0:
5961 case X86::V_SET0:
5962 case X86::V_SETALLONES:
5963 case X86::AVX2_SETALLONES:
5964 case X86::AVX1_SETALLONES:
5965 case X86::AVX_SET0:
5966 case X86::AVX512_128_SET0:
5967 case X86::AVX512_256_SET0:
5968 case X86::AVX512_512_SET0:
5969 case X86::AVX512_512_SETALLONES:
5970 case X86::FsFLD0SD:
5971 case X86::AVX512_FsFLD0SD:
5972 case X86::FsFLD0SS:
5973 case X86::AVX512_FsFLD0SS:
5974 case X86::FsFLD0F128:
5975 case X86::AVX512_FsFLD0F128: {
5976 // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
5977 // Create a constant-pool entry and operands to load from it.
5978
5979 // Medium and large mode can't fold loads this way.
5980 if (MF.getTarget().getCodeModel() != CodeModel::Small &&
5981 MF.getTarget().getCodeModel() != CodeModel::Kernel)
5982 return nullptr;
5983
5984 // x86-32 PIC requires a PIC base register for constant pools.
5985 unsigned PICBase = 0;
5986 if (MF.getTarget().isPositionIndependent()) {
5987 if (Subtarget.is64Bit())
5988 PICBase = X86::RIP;
5989 else
5990 // FIXME: PICBase = getGlobalBaseReg(&MF);
5991 // This doesn't work for several reasons.
5992 // 1. GlobalBaseReg may have been spilled.
5993 // 2. It may not be live at MI.
5994 return nullptr;
5995 }
5996
5997 // Create a constant-pool entry.
5998 MachineConstantPool &MCP = *MF.getConstantPool();
5999 Type *Ty;
6000 unsigned Opc = LoadMI.getOpcode();
6001 if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS)
6002 Ty = Type::getFloatTy(MF.getFunction().getContext());
6003 else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD)
6004 Ty = Type::getDoubleTy(MF.getFunction().getContext());
6005 else if (Opc == X86::FsFLD0F128 || Opc == X86::AVX512_FsFLD0F128)
6006 Ty = Type::getFP128Ty(MF.getFunction().getContext());
6007 else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
6008 Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6009 16);
6010 else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
6011 Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES)
6012 Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6013 8);
6014 else if (Opc == X86::MMX_SET0)
6015 Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6016 2);
6017 else
6018 Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6019 4);
6020
6021 bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
6022 Opc == X86::AVX512_512_SETALLONES ||
6023 Opc == X86::AVX1_SETALLONES);
6024 const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
6025 Constant::getNullValue(Ty);
6026 unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
6027
6028 // Create operands to load from the constant pool entry.
6029 MOs.push_back(MachineOperand::CreateReg(PICBase, false));
6030 MOs.push_back(MachineOperand::CreateImm(1));
6031 MOs.push_back(MachineOperand::CreateReg(0, false));
6032 MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
6033 MOs.push_back(MachineOperand::CreateReg(0, false));
6034 break;
6035 }
6036 default: {
6037 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
6038 return nullptr;
6039
6040 // Folding a normal load. Just copy the load's address operands.
6041 MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,
6042 LoadMI.operands_begin() + NumOps);
6043 break;
6044 }
6045 }
6046 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
6047 /*Size=*/0, Alignment, /*AllowCommute=*/true);
6048 }
6049
6050 static SmallVector<MachineMemOperand *, 2>
extractLoadMMOs(ArrayRef<MachineMemOperand * > MMOs,MachineFunction & MF)6051 extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
6052 SmallVector<MachineMemOperand *, 2> LoadMMOs;
6053
6054 for (MachineMemOperand *MMO : MMOs) {
6055 if (!MMO->isLoad())
6056 continue;
6057
6058 if (!MMO->isStore()) {
6059 // Reuse the MMO.
6060 LoadMMOs.push_back(MMO);
6061 } else {
6062 // Clone the MMO and unset the store flag.
6063 LoadMMOs.push_back(MF.getMachineMemOperand(
6064 MMO, MMO->getFlags() & ~MachineMemOperand::MOStore));
6065 }
6066 }
6067
6068 return LoadMMOs;
6069 }
6070
6071 static SmallVector<MachineMemOperand *, 2>
extractStoreMMOs(ArrayRef<MachineMemOperand * > MMOs,MachineFunction & MF)6072 extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
6073 SmallVector<MachineMemOperand *, 2> StoreMMOs;
6074
6075 for (MachineMemOperand *MMO : MMOs) {
6076 if (!MMO->isStore())
6077 continue;
6078
6079 if (!MMO->isLoad()) {
6080 // Reuse the MMO.
6081 StoreMMOs.push_back(MMO);
6082 } else {
6083 // Clone the MMO and unset the load flag.
6084 StoreMMOs.push_back(MF.getMachineMemOperand(
6085 MMO, MMO->getFlags() & ~MachineMemOperand::MOLoad));
6086 }
6087 }
6088
6089 return StoreMMOs;
6090 }
6091
getBroadcastOpcode(const X86MemoryFoldTableEntry * I,const TargetRegisterClass * RC,const X86Subtarget & STI)6092 static unsigned getBroadcastOpcode(const X86MemoryFoldTableEntry *I,
6093 const TargetRegisterClass *RC,
6094 const X86Subtarget &STI) {
6095 assert(STI.hasAVX512() && "Expected at least AVX512!");
6096 unsigned SpillSize = STI.getRegisterInfo()->getSpillSize(*RC);
6097 assert((SpillSize == 64 || STI.hasVLX()) &&
6098 "Can't broadcast less than 64 bytes without AVX512VL!");
6099
6100 switch (I->Flags & TB_BCAST_MASK) {
6101 default: llvm_unreachable("Unexpected broadcast type!");
6102 case TB_BCAST_D:
6103 switch (SpillSize) {
6104 default: llvm_unreachable("Unknown spill size");
6105 case 16: return X86::VPBROADCASTDZ128rm;
6106 case 32: return X86::VPBROADCASTDZ256rm;
6107 case 64: return X86::VPBROADCASTDZrm;
6108 }
6109 break;
6110 case TB_BCAST_Q:
6111 switch (SpillSize) {
6112 default: llvm_unreachable("Unknown spill size");
6113 case 16: return X86::VPBROADCASTQZ128rm;
6114 case 32: return X86::VPBROADCASTQZ256rm;
6115 case 64: return X86::VPBROADCASTQZrm;
6116 }
6117 break;
6118 case TB_BCAST_SS:
6119 switch (SpillSize) {
6120 default: llvm_unreachable("Unknown spill size");
6121 case 16: return X86::VBROADCASTSSZ128rm;
6122 case 32: return X86::VBROADCASTSSZ256rm;
6123 case 64: return X86::VBROADCASTSSZrm;
6124 }
6125 break;
6126 case TB_BCAST_SD:
6127 switch (SpillSize) {
6128 default: llvm_unreachable("Unknown spill size");
6129 case 16: return X86::VMOVDDUPZ128rm;
6130 case 32: return X86::VBROADCASTSDZ256rm;
6131 case 64: return X86::VBROADCASTSDZrm;
6132 }
6133 break;
6134 }
6135 }
6136
unfoldMemoryOperand(MachineFunction & MF,MachineInstr & MI,unsigned Reg,bool UnfoldLoad,bool UnfoldStore,SmallVectorImpl<MachineInstr * > & NewMIs) const6137 bool X86InstrInfo::unfoldMemoryOperand(
6138 MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad,
6139 bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {
6140 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode());
6141 if (I == nullptr)
6142 return false;
6143 unsigned Opc = I->DstOp;
6144 unsigned Index = I->Flags & TB_INDEX_MASK;
6145 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6146 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6147 bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
6148 if (UnfoldLoad && !FoldedLoad)
6149 return false;
6150 UnfoldLoad &= FoldedLoad;
6151 if (UnfoldStore && !FoldedStore)
6152 return false;
6153 UnfoldStore &= FoldedStore;
6154
6155 const MCInstrDesc &MCID = get(Opc);
6156
6157 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
6158 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
6159 // TODO: Check if 32-byte or greater accesses are slow too?
6160 if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
6161 Subtarget.isUnalignedMem16Slow())
6162 // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
6163 // conservatively assume the address is unaligned. That's bad for
6164 // performance.
6165 return false;
6166 SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
6167 SmallVector<MachineOperand,2> BeforeOps;
6168 SmallVector<MachineOperand,2> AfterOps;
6169 SmallVector<MachineOperand,4> ImpOps;
6170 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
6171 MachineOperand &Op = MI.getOperand(i);
6172 if (i >= Index && i < Index + X86::AddrNumOperands)
6173 AddrOps.push_back(Op);
6174 else if (Op.isReg() && Op.isImplicit())
6175 ImpOps.push_back(Op);
6176 else if (i < Index)
6177 BeforeOps.push_back(Op);
6178 else if (i > Index)
6179 AfterOps.push_back(Op);
6180 }
6181
6182 // Emit the load or broadcast instruction.
6183 if (UnfoldLoad) {
6184 auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
6185
6186 unsigned Opc;
6187 if (FoldedBCast) {
6188 Opc = getBroadcastOpcode(I, RC, Subtarget);
6189 } else {
6190 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6191 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6192 Opc = getLoadRegOpcode(Reg, RC, isAligned, Subtarget);
6193 }
6194
6195 DebugLoc DL;
6196 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), Reg);
6197 for (unsigned i = 0, e = AddrOps.size(); i != e; ++i)
6198 MIB.add(AddrOps[i]);
6199 MIB.setMemRefs(MMOs);
6200 NewMIs.push_back(MIB);
6201
6202 if (UnfoldStore) {
6203 // Address operands cannot be marked isKill.
6204 for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
6205 MachineOperand &MO = NewMIs[0]->getOperand(i);
6206 if (MO.isReg())
6207 MO.setIsKill(false);
6208 }
6209 }
6210 }
6211
6212 // Emit the data processing instruction.
6213 MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true);
6214 MachineInstrBuilder MIB(MF, DataMI);
6215
6216 if (FoldedStore)
6217 MIB.addReg(Reg, RegState::Define);
6218 for (MachineOperand &BeforeOp : BeforeOps)
6219 MIB.add(BeforeOp);
6220 if (FoldedLoad)
6221 MIB.addReg(Reg);
6222 for (MachineOperand &AfterOp : AfterOps)
6223 MIB.add(AfterOp);
6224 for (MachineOperand &ImpOp : ImpOps) {
6225 MIB.addReg(ImpOp.getReg(),
6226 getDefRegState(ImpOp.isDef()) |
6227 RegState::Implicit |
6228 getKillRegState(ImpOp.isKill()) |
6229 getDeadRegState(ImpOp.isDead()) |
6230 getUndefRegState(ImpOp.isUndef()));
6231 }
6232 // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
6233 switch (DataMI->getOpcode()) {
6234 default: break;
6235 case X86::CMP64ri32:
6236 case X86::CMP64ri8:
6237 case X86::CMP32ri:
6238 case X86::CMP32ri8:
6239 case X86::CMP16ri:
6240 case X86::CMP16ri8:
6241 case X86::CMP8ri: {
6242 MachineOperand &MO0 = DataMI->getOperand(0);
6243 MachineOperand &MO1 = DataMI->getOperand(1);
6244 if (MO1.getImm() == 0) {
6245 unsigned NewOpc;
6246 switch (DataMI->getOpcode()) {
6247 default: llvm_unreachable("Unreachable!");
6248 case X86::CMP64ri8:
6249 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
6250 case X86::CMP32ri8:
6251 case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
6252 case X86::CMP16ri8:
6253 case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
6254 case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
6255 }
6256 DataMI->setDesc(get(NewOpc));
6257 MO1.ChangeToRegister(MO0.getReg(), false);
6258 }
6259 }
6260 }
6261 NewMIs.push_back(DataMI);
6262
6263 // Emit the store instruction.
6264 if (UnfoldStore) {
6265 const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
6266 auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
6267 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16);
6268 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6269 unsigned Opc = getStoreRegOpcode(Reg, DstRC, isAligned, Subtarget);
6270 DebugLoc DL;
6271 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
6272 for (unsigned i = 0, e = AddrOps.size(); i != e; ++i)
6273 MIB.add(AddrOps[i]);
6274 MIB.addReg(Reg, RegState::Kill);
6275 MIB.setMemRefs(MMOs);
6276 NewMIs.push_back(MIB);
6277 }
6278
6279 return true;
6280 }
6281
6282 bool
unfoldMemoryOperand(SelectionDAG & DAG,SDNode * N,SmallVectorImpl<SDNode * > & NewNodes) const6283 X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
6284 SmallVectorImpl<SDNode*> &NewNodes) const {
6285 if (!N->isMachineOpcode())
6286 return false;
6287
6288 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode());
6289 if (I == nullptr)
6290 return false;
6291 unsigned Opc = I->DstOp;
6292 unsigned Index = I->Flags & TB_INDEX_MASK;
6293 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6294 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6295 bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
6296 const MCInstrDesc &MCID = get(Opc);
6297 MachineFunction &MF = DAG.getMachineFunction();
6298 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
6299 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
6300 unsigned NumDefs = MCID.NumDefs;
6301 std::vector<SDValue> AddrOps;
6302 std::vector<SDValue> BeforeOps;
6303 std::vector<SDValue> AfterOps;
6304 SDLoc dl(N);
6305 unsigned NumOps = N->getNumOperands();
6306 for (unsigned i = 0; i != NumOps-1; ++i) {
6307 SDValue Op = N->getOperand(i);
6308 if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
6309 AddrOps.push_back(Op);
6310 else if (i < Index-NumDefs)
6311 BeforeOps.push_back(Op);
6312 else if (i > Index-NumDefs)
6313 AfterOps.push_back(Op);
6314 }
6315 SDValue Chain = N->getOperand(NumOps-1);
6316 AddrOps.push_back(Chain);
6317
6318 // Emit the load instruction.
6319 SDNode *Load = nullptr;
6320 if (FoldedLoad) {
6321 EVT VT = *TRI.legalclasstypes_begin(*RC);
6322 auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
6323 if (MMOs.empty() && RC == &X86::VR128RegClass &&
6324 Subtarget.isUnalignedMem16Slow())
6325 // Do not introduce a slow unaligned load.
6326 return false;
6327 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
6328 // memory access is slow above.
6329
6330 unsigned Opc;
6331 if (FoldedBCast) {
6332 Opc = getBroadcastOpcode(I, RC, Subtarget);
6333 } else {
6334 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6335 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6336 Opc = getLoadRegOpcode(0, RC, isAligned, Subtarget);
6337 }
6338
6339 Load = DAG.getMachineNode(Opc, dl, VT, MVT::Other, AddrOps);
6340 NewNodes.push_back(Load);
6341
6342 // Preserve memory reference information.
6343 DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs);
6344 }
6345
6346 // Emit the data processing instruction.
6347 std::vector<EVT> VTs;
6348 const TargetRegisterClass *DstRC = nullptr;
6349 if (MCID.getNumDefs() > 0) {
6350 DstRC = getRegClass(MCID, 0, &RI, MF);
6351 VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
6352 }
6353 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
6354 EVT VT = N->getValueType(i);
6355 if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
6356 VTs.push_back(VT);
6357 }
6358 if (Load)
6359 BeforeOps.push_back(SDValue(Load, 0));
6360 BeforeOps.insert(BeforeOps.end(), AfterOps.begin(), AfterOps.end());
6361 // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
6362 switch (Opc) {
6363 default: break;
6364 case X86::CMP64ri32:
6365 case X86::CMP64ri8:
6366 case X86::CMP32ri:
6367 case X86::CMP32ri8:
6368 case X86::CMP16ri:
6369 case X86::CMP16ri8:
6370 case X86::CMP8ri:
6371 if (isNullConstant(BeforeOps[1])) {
6372 switch (Opc) {
6373 default: llvm_unreachable("Unreachable!");
6374 case X86::CMP64ri8:
6375 case X86::CMP64ri32: Opc = X86::TEST64rr; break;
6376 case X86::CMP32ri8:
6377 case X86::CMP32ri: Opc = X86::TEST32rr; break;
6378 case X86::CMP16ri8:
6379 case X86::CMP16ri: Opc = X86::TEST16rr; break;
6380 case X86::CMP8ri: Opc = X86::TEST8rr; break;
6381 }
6382 BeforeOps[1] = BeforeOps[0];
6383 }
6384 }
6385 SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps);
6386 NewNodes.push_back(NewNode);
6387
6388 // Emit the store instruction.
6389 if (FoldedStore) {
6390 AddrOps.pop_back();
6391 AddrOps.push_back(SDValue(NewNode, 0));
6392 AddrOps.push_back(Chain);
6393 auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
6394 if (MMOs.empty() && RC == &X86::VR128RegClass &&
6395 Subtarget.isUnalignedMem16Slow())
6396 // Do not introduce a slow unaligned store.
6397 return false;
6398 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
6399 // memory access is slow above.
6400 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6401 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6402 SDNode *Store =
6403 DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
6404 dl, MVT::Other, AddrOps);
6405 NewNodes.push_back(Store);
6406
6407 // Preserve memory reference information.
6408 DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs);
6409 }
6410
6411 return true;
6412 }
6413
getOpcodeAfterMemoryUnfold(unsigned Opc,bool UnfoldLoad,bool UnfoldStore,unsigned * LoadRegIndex) const6414 unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
6415 bool UnfoldLoad, bool UnfoldStore,
6416 unsigned *LoadRegIndex) const {
6417 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc);
6418 if (I == nullptr)
6419 return 0;
6420 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6421 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6422 if (UnfoldLoad && !FoldedLoad)
6423 return 0;
6424 if (UnfoldStore && !FoldedStore)
6425 return 0;
6426 if (LoadRegIndex)
6427 *LoadRegIndex = I->Flags & TB_INDEX_MASK;
6428 return I->DstOp;
6429 }
6430
6431 bool
areLoadsFromSameBasePtr(SDNode * Load1,SDNode * Load2,int64_t & Offset1,int64_t & Offset2) const6432 X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
6433 int64_t &Offset1, int64_t &Offset2) const {
6434 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
6435 return false;
6436 unsigned Opc1 = Load1->getMachineOpcode();
6437 unsigned Opc2 = Load2->getMachineOpcode();
6438 switch (Opc1) {
6439 default: return false;
6440 case X86::MOV8rm:
6441 case X86::MOV16rm:
6442 case X86::MOV32rm:
6443 case X86::MOV64rm:
6444 case X86::LD_Fp32m:
6445 case X86::LD_Fp64m:
6446 case X86::LD_Fp80m:
6447 case X86::MOVSSrm:
6448 case X86::MOVSSrm_alt:
6449 case X86::MOVSDrm:
6450 case X86::MOVSDrm_alt:
6451 case X86::MMX_MOVD64rm:
6452 case X86::MMX_MOVQ64rm:
6453 case X86::MOVAPSrm:
6454 case X86::MOVUPSrm:
6455 case X86::MOVAPDrm:
6456 case X86::MOVUPDrm:
6457 case X86::MOVDQArm:
6458 case X86::MOVDQUrm:
6459 // AVX load instructions
6460 case X86::VMOVSSrm:
6461 case X86::VMOVSSrm_alt:
6462 case X86::VMOVSDrm:
6463 case X86::VMOVSDrm_alt:
6464 case X86::VMOVAPSrm:
6465 case X86::VMOVUPSrm:
6466 case X86::VMOVAPDrm:
6467 case X86::VMOVUPDrm:
6468 case X86::VMOVDQArm:
6469 case X86::VMOVDQUrm:
6470 case X86::VMOVAPSYrm:
6471 case X86::VMOVUPSYrm:
6472 case X86::VMOVAPDYrm:
6473 case X86::VMOVUPDYrm:
6474 case X86::VMOVDQAYrm:
6475 case X86::VMOVDQUYrm:
6476 // AVX512 load instructions
6477 case X86::VMOVSSZrm:
6478 case X86::VMOVSSZrm_alt:
6479 case X86::VMOVSDZrm:
6480 case X86::VMOVSDZrm_alt:
6481 case X86::VMOVAPSZ128rm:
6482 case X86::VMOVUPSZ128rm:
6483 case X86::VMOVAPSZ128rm_NOVLX:
6484 case X86::VMOVUPSZ128rm_NOVLX:
6485 case X86::VMOVAPDZ128rm:
6486 case X86::VMOVUPDZ128rm:
6487 case X86::VMOVDQU8Z128rm:
6488 case X86::VMOVDQU16Z128rm:
6489 case X86::VMOVDQA32Z128rm:
6490 case X86::VMOVDQU32Z128rm:
6491 case X86::VMOVDQA64Z128rm:
6492 case X86::VMOVDQU64Z128rm:
6493 case X86::VMOVAPSZ256rm:
6494 case X86::VMOVUPSZ256rm:
6495 case X86::VMOVAPSZ256rm_NOVLX:
6496 case X86::VMOVUPSZ256rm_NOVLX:
6497 case X86::VMOVAPDZ256rm:
6498 case X86::VMOVUPDZ256rm:
6499 case X86::VMOVDQU8Z256rm:
6500 case X86::VMOVDQU16Z256rm:
6501 case X86::VMOVDQA32Z256rm:
6502 case X86::VMOVDQU32Z256rm:
6503 case X86::VMOVDQA64Z256rm:
6504 case X86::VMOVDQU64Z256rm:
6505 case X86::VMOVAPSZrm:
6506 case X86::VMOVUPSZrm:
6507 case X86::VMOVAPDZrm:
6508 case X86::VMOVUPDZrm:
6509 case X86::VMOVDQU8Zrm:
6510 case X86::VMOVDQU16Zrm:
6511 case X86::VMOVDQA32Zrm:
6512 case X86::VMOVDQU32Zrm:
6513 case X86::VMOVDQA64Zrm:
6514 case X86::VMOVDQU64Zrm:
6515 case X86::KMOVBkm:
6516 case X86::KMOVWkm:
6517 case X86::KMOVDkm:
6518 case X86::KMOVQkm:
6519 break;
6520 }
6521 switch (Opc2) {
6522 default: return false;
6523 case X86::MOV8rm:
6524 case X86::MOV16rm:
6525 case X86::MOV32rm:
6526 case X86::MOV64rm:
6527 case X86::LD_Fp32m:
6528 case X86::LD_Fp64m:
6529 case X86::LD_Fp80m:
6530 case X86::MOVSSrm:
6531 case X86::MOVSSrm_alt:
6532 case X86::MOVSDrm:
6533 case X86::MOVSDrm_alt:
6534 case X86::MMX_MOVD64rm:
6535 case X86::MMX_MOVQ64rm:
6536 case X86::MOVAPSrm:
6537 case X86::MOVUPSrm:
6538 case X86::MOVAPDrm:
6539 case X86::MOVUPDrm:
6540 case X86::MOVDQArm:
6541 case X86::MOVDQUrm:
6542 // AVX load instructions
6543 case X86::VMOVSSrm:
6544 case X86::VMOVSSrm_alt:
6545 case X86::VMOVSDrm:
6546 case X86::VMOVSDrm_alt:
6547 case X86::VMOVAPSrm:
6548 case X86::VMOVUPSrm:
6549 case X86::VMOVAPDrm:
6550 case X86::VMOVUPDrm:
6551 case X86::VMOVDQArm:
6552 case X86::VMOVDQUrm:
6553 case X86::VMOVAPSYrm:
6554 case X86::VMOVUPSYrm:
6555 case X86::VMOVAPDYrm:
6556 case X86::VMOVUPDYrm:
6557 case X86::VMOVDQAYrm:
6558 case X86::VMOVDQUYrm:
6559 // AVX512 load instructions
6560 case X86::VMOVSSZrm:
6561 case X86::VMOVSSZrm_alt:
6562 case X86::VMOVSDZrm:
6563 case X86::VMOVSDZrm_alt:
6564 case X86::VMOVAPSZ128rm:
6565 case X86::VMOVUPSZ128rm:
6566 case X86::VMOVAPSZ128rm_NOVLX:
6567 case X86::VMOVUPSZ128rm_NOVLX:
6568 case X86::VMOVAPDZ128rm:
6569 case X86::VMOVUPDZ128rm:
6570 case X86::VMOVDQU8Z128rm:
6571 case X86::VMOVDQU16Z128rm:
6572 case X86::VMOVDQA32Z128rm:
6573 case X86::VMOVDQU32Z128rm:
6574 case X86::VMOVDQA64Z128rm:
6575 case X86::VMOVDQU64Z128rm:
6576 case X86::VMOVAPSZ256rm:
6577 case X86::VMOVUPSZ256rm:
6578 case X86::VMOVAPSZ256rm_NOVLX:
6579 case X86::VMOVUPSZ256rm_NOVLX:
6580 case X86::VMOVAPDZ256rm:
6581 case X86::VMOVUPDZ256rm:
6582 case X86::VMOVDQU8Z256rm:
6583 case X86::VMOVDQU16Z256rm:
6584 case X86::VMOVDQA32Z256rm:
6585 case X86::VMOVDQU32Z256rm:
6586 case X86::VMOVDQA64Z256rm:
6587 case X86::VMOVDQU64Z256rm:
6588 case X86::VMOVAPSZrm:
6589 case X86::VMOVUPSZrm:
6590 case X86::VMOVAPDZrm:
6591 case X86::VMOVUPDZrm:
6592 case X86::VMOVDQU8Zrm:
6593 case X86::VMOVDQU16Zrm:
6594 case X86::VMOVDQA32Zrm:
6595 case X86::VMOVDQU32Zrm:
6596 case X86::VMOVDQA64Zrm:
6597 case X86::VMOVDQU64Zrm:
6598 case X86::KMOVBkm:
6599 case X86::KMOVWkm:
6600 case X86::KMOVDkm:
6601 case X86::KMOVQkm:
6602 break;
6603 }
6604
6605 // Lambda to check if both the loads have the same value for an operand index.
6606 auto HasSameOp = [&](int I) {
6607 return Load1->getOperand(I) == Load2->getOperand(I);
6608 };
6609
6610 // All operands except the displacement should match.
6611 if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) ||
6612 !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg))
6613 return false;
6614
6615 // Chain Operand must be the same.
6616 if (!HasSameOp(5))
6617 return false;
6618
6619 // Now let's examine if the displacements are constants.
6620 auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp));
6621 auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp));
6622 if (!Disp1 || !Disp2)
6623 return false;
6624
6625 Offset1 = Disp1->getSExtValue();
6626 Offset2 = Disp2->getSExtValue();
6627 return true;
6628 }
6629
shouldScheduleLoadsNear(SDNode * Load1,SDNode * Load2,int64_t Offset1,int64_t Offset2,unsigned NumLoads) const6630 bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
6631 int64_t Offset1, int64_t Offset2,
6632 unsigned NumLoads) const {
6633 assert(Offset2 > Offset1);
6634 if ((Offset2 - Offset1) / 8 > 64)
6635 return false;
6636
6637 unsigned Opc1 = Load1->getMachineOpcode();
6638 unsigned Opc2 = Load2->getMachineOpcode();
6639 if (Opc1 != Opc2)
6640 return false; // FIXME: overly conservative?
6641
6642 switch (Opc1) {
6643 default: break;
6644 case X86::LD_Fp32m:
6645 case X86::LD_Fp64m:
6646 case X86::LD_Fp80m:
6647 case X86::MMX_MOVD64rm:
6648 case X86::MMX_MOVQ64rm:
6649 return false;
6650 }
6651
6652 EVT VT = Load1->getValueType(0);
6653 switch (VT.getSimpleVT().SimpleTy) {
6654 default:
6655 // XMM registers. In 64-bit mode we can be a bit more aggressive since we
6656 // have 16 of them to play with.
6657 if (Subtarget.is64Bit()) {
6658 if (NumLoads >= 3)
6659 return false;
6660 } else if (NumLoads) {
6661 return false;
6662 }
6663 break;
6664 case MVT::i8:
6665 case MVT::i16:
6666 case MVT::i32:
6667 case MVT::i64:
6668 case MVT::f32:
6669 case MVT::f64:
6670 if (NumLoads)
6671 return false;
6672 break;
6673 }
6674
6675 return true;
6676 }
6677
6678 bool X86InstrInfo::
reverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const6679 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
6680 assert(Cond.size() == 1 && "Invalid X86 branch condition!");
6681 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
6682 Cond[0].setImm(GetOppositeBranchCondition(CC));
6683 return false;
6684 }
6685
6686 bool X86InstrInfo::
isSafeToMoveRegClassDefs(const TargetRegisterClass * RC) const6687 isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
6688 // FIXME: Return false for x87 stack register classes for now. We can't
6689 // allow any loads of these registers before FpGet_ST0_80.
6690 return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
6691 RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
6692 RC == &X86::RFP80RegClass);
6693 }
6694
6695 /// Return a virtual register initialized with the
6696 /// the global base register value. Output instructions required to
6697 /// initialize the register in the function entry block, if necessary.
6698 ///
6699 /// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
6700 ///
getGlobalBaseReg(MachineFunction * MF) const6701 unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
6702 assert((!Subtarget.is64Bit() ||
6703 MF->getTarget().getCodeModel() == CodeModel::Medium ||
6704 MF->getTarget().getCodeModel() == CodeModel::Large) &&
6705 "X86-64 PIC uses RIP relative addressing");
6706
6707 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
6708 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
6709 if (GlobalBaseReg != 0)
6710 return GlobalBaseReg;
6711
6712 // Create the register. The code to initialize it is inserted
6713 // later, by the CGBR pass (below).
6714 MachineRegisterInfo &RegInfo = MF->getRegInfo();
6715 GlobalBaseReg = RegInfo.createVirtualRegister(
6716 Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
6717 X86FI->setGlobalBaseReg(GlobalBaseReg);
6718 return GlobalBaseReg;
6719 }
6720
6721 // These are the replaceable SSE instructions. Some of these have Int variants
6722 // that we don't include here. We don't want to replace instructions selected
6723 // by intrinsics.
6724 static const uint16_t ReplaceableInstrs[][3] = {
6725 //PackedSingle PackedDouble PackedInt
6726 { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr },
6727 { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm },
6728 { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr },
6729 { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr },
6730 { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm },
6731 { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr },
6732 { X86::MOVSDmr, X86::MOVSDmr, X86::MOVPQI2QImr },
6733 { X86::MOVSSmr, X86::MOVSSmr, X86::MOVPDI2DImr },
6734 { X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm },
6735 { X86::MOVSDrm_alt,X86::MOVSDrm_alt,X86::MOVQI2PQIrm },
6736 { X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm },
6737 { X86::MOVSSrm_alt,X86::MOVSSrm_alt,X86::MOVDI2PDIrm },
6738 { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr },
6739 { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm },
6740 { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr },
6741 { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm },
6742 { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr },
6743 { X86::ORPSrm, X86::ORPDrm, X86::PORrm },
6744 { X86::ORPSrr, X86::ORPDrr, X86::PORrr },
6745 { X86::XORPSrm, X86::XORPDrm, X86::PXORrm },
6746 { X86::XORPSrr, X86::XORPDrr, X86::PXORrr },
6747 { X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm },
6748 { X86::MOVLHPSrr, X86::UNPCKLPDrr, X86::PUNPCKLQDQrr },
6749 { X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm },
6750 { X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr },
6751 { X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm },
6752 { X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr },
6753 { X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm },
6754 { X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr },
6755 { X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr },
6756 { X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr },
6757 // AVX 128-bit support
6758 { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr },
6759 { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm },
6760 { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr },
6761 { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr },
6762 { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm },
6763 { X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr },
6764 { X86::VMOVSDmr, X86::VMOVSDmr, X86::VMOVPQI2QImr },
6765 { X86::VMOVSSmr, X86::VMOVSSmr, X86::VMOVPDI2DImr },
6766 { X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm },
6767 { X86::VMOVSDrm_alt,X86::VMOVSDrm_alt,X86::VMOVQI2PQIrm },
6768 { X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm },
6769 { X86::VMOVSSrm_alt,X86::VMOVSSrm_alt,X86::VMOVDI2PDIrm },
6770 { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
6771 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm },
6772 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr },
6773 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm },
6774 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr },
6775 { X86::VORPSrm, X86::VORPDrm, X86::VPORrm },
6776 { X86::VORPSrr, X86::VORPDrr, X86::VPORrr },
6777 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm },
6778 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr },
6779 { X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm },
6780 { X86::VMOVLHPSrr, X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr },
6781 { X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm },
6782 { X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr },
6783 { X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm },
6784 { X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr },
6785 { X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm },
6786 { X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr },
6787 { X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr },
6788 { X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr },
6789 // AVX 256-bit support
6790 { X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr },
6791 { X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm },
6792 { X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr },
6793 { X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr },
6794 { X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm },
6795 { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr },
6796 { X86::VPERMPSYrm, X86::VPERMPSYrm, X86::VPERMDYrm },
6797 { X86::VPERMPSYrr, X86::VPERMPSYrr, X86::VPERMDYrr },
6798 { X86::VPERMPDYmi, X86::VPERMPDYmi, X86::VPERMQYmi },
6799 { X86::VPERMPDYri, X86::VPERMPDYri, X86::VPERMQYri },
6800 // AVX512 support
6801 { X86::VMOVLPSZ128mr, X86::VMOVLPDZ128mr, X86::VMOVPQI2QIZmr },
6802 { X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr },
6803 { X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr },
6804 { X86::VMOVNTPSZmr, X86::VMOVNTPDZmr, X86::VMOVNTDQZmr },
6805 { X86::VMOVSDZmr, X86::VMOVSDZmr, X86::VMOVPQI2QIZmr },
6806 { X86::VMOVSSZmr, X86::VMOVSSZmr, X86::VMOVPDI2DIZmr },
6807 { X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm },
6808 { X86::VMOVSDZrm_alt, X86::VMOVSDZrm_alt, X86::VMOVQI2PQIZrm },
6809 { X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm },
6810 { X86::VMOVSSZrm_alt, X86::VMOVSSZrm_alt, X86::VMOVDI2PDIZrm },
6811 { X86::VBROADCASTSSZ128rr,X86::VBROADCASTSSZ128rr,X86::VPBROADCASTDZ128rr },
6812 { X86::VBROADCASTSSZ128rm,X86::VBROADCASTSSZ128rm,X86::VPBROADCASTDZ128rm },
6813 { X86::VBROADCASTSSZ256rr,X86::VBROADCASTSSZ256rr,X86::VPBROADCASTDZ256rr },
6814 { X86::VBROADCASTSSZ256rm,X86::VBROADCASTSSZ256rm,X86::VPBROADCASTDZ256rm },
6815 { X86::VBROADCASTSSZrr, X86::VBROADCASTSSZrr, X86::VPBROADCASTDZrr },
6816 { X86::VBROADCASTSSZrm, X86::VBROADCASTSSZrm, X86::VPBROADCASTDZrm },
6817 { X86::VMOVDDUPZ128rr, X86::VMOVDDUPZ128rr, X86::VPBROADCASTQZ128rr },
6818 { X86::VMOVDDUPZ128rm, X86::VMOVDDUPZ128rm, X86::VPBROADCASTQZ128rm },
6819 { X86::VBROADCASTSDZ256rr,X86::VBROADCASTSDZ256rr,X86::VPBROADCASTQZ256rr },
6820 { X86::VBROADCASTSDZ256rm,X86::VBROADCASTSDZ256rm,X86::VPBROADCASTQZ256rm },
6821 { X86::VBROADCASTSDZrr, X86::VBROADCASTSDZrr, X86::VPBROADCASTQZrr },
6822 { X86::VBROADCASTSDZrm, X86::VBROADCASTSDZrm, X86::VPBROADCASTQZrm },
6823 { X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrr, X86::VINSERTI32x4Zrr },
6824 { X86::VINSERTF32x4Zrm, X86::VINSERTF32x4Zrm, X86::VINSERTI32x4Zrm },
6825 { X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrr, X86::VINSERTI32x8Zrr },
6826 { X86::VINSERTF32x8Zrm, X86::VINSERTF32x8Zrm, X86::VINSERTI32x8Zrm },
6827 { X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrr, X86::VINSERTI64x2Zrr },
6828 { X86::VINSERTF64x2Zrm, X86::VINSERTF64x2Zrm, X86::VINSERTI64x2Zrm },
6829 { X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrr, X86::VINSERTI64x4Zrr },
6830 { X86::VINSERTF64x4Zrm, X86::VINSERTF64x4Zrm, X86::VINSERTI64x4Zrm },
6831 { X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr },
6832 { X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm },
6833 { X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr },
6834 { X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm },
6835 { X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zrr, X86::VEXTRACTI32x4Zrr },
6836 { X86::VEXTRACTF32x4Zmr, X86::VEXTRACTF32x4Zmr, X86::VEXTRACTI32x4Zmr },
6837 { X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zrr, X86::VEXTRACTI32x8Zrr },
6838 { X86::VEXTRACTF32x8Zmr, X86::VEXTRACTF32x8Zmr, X86::VEXTRACTI32x8Zmr },
6839 { X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zrr, X86::VEXTRACTI64x2Zrr },
6840 { X86::VEXTRACTF64x2Zmr, X86::VEXTRACTF64x2Zmr, X86::VEXTRACTI64x2Zmr },
6841 { X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zrr, X86::VEXTRACTI64x4Zrr },
6842 { X86::VEXTRACTF64x4Zmr, X86::VEXTRACTF64x4Zmr, X86::VEXTRACTI64x4Zmr },
6843 { X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr },
6844 { X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr },
6845 { X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr },
6846 { X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr },
6847 { X86::VPERMILPSmi, X86::VPERMILPSmi, X86::VPSHUFDmi },
6848 { X86::VPERMILPSri, X86::VPERMILPSri, X86::VPSHUFDri },
6849 { X86::VPERMILPSZ128mi, X86::VPERMILPSZ128mi, X86::VPSHUFDZ128mi },
6850 { X86::VPERMILPSZ128ri, X86::VPERMILPSZ128ri, X86::VPSHUFDZ128ri },
6851 { X86::VPERMILPSZ256mi, X86::VPERMILPSZ256mi, X86::VPSHUFDZ256mi },
6852 { X86::VPERMILPSZ256ri, X86::VPERMILPSZ256ri, X86::VPSHUFDZ256ri },
6853 { X86::VPERMILPSZmi, X86::VPERMILPSZmi, X86::VPSHUFDZmi },
6854 { X86::VPERMILPSZri, X86::VPERMILPSZri, X86::VPSHUFDZri },
6855 { X86::VPERMPSZ256rm, X86::VPERMPSZ256rm, X86::VPERMDZ256rm },
6856 { X86::VPERMPSZ256rr, X86::VPERMPSZ256rr, X86::VPERMDZ256rr },
6857 { X86::VPERMPDZ256mi, X86::VPERMPDZ256mi, X86::VPERMQZ256mi },
6858 { X86::VPERMPDZ256ri, X86::VPERMPDZ256ri, X86::VPERMQZ256ri },
6859 { X86::VPERMPDZ256rm, X86::VPERMPDZ256rm, X86::VPERMQZ256rm },
6860 { X86::VPERMPDZ256rr, X86::VPERMPDZ256rr, X86::VPERMQZ256rr },
6861 { X86::VPERMPSZrm, X86::VPERMPSZrm, X86::VPERMDZrm },
6862 { X86::VPERMPSZrr, X86::VPERMPSZrr, X86::VPERMDZrr },
6863 { X86::VPERMPDZmi, X86::VPERMPDZmi, X86::VPERMQZmi },
6864 { X86::VPERMPDZri, X86::VPERMPDZri, X86::VPERMQZri },
6865 { X86::VPERMPDZrm, X86::VPERMPDZrm, X86::VPERMQZrm },
6866 { X86::VPERMPDZrr, X86::VPERMPDZrr, X86::VPERMQZrr },
6867 { X86::VUNPCKLPDZ256rm, X86::VUNPCKLPDZ256rm, X86::VPUNPCKLQDQZ256rm },
6868 { X86::VUNPCKLPDZ256rr, X86::VUNPCKLPDZ256rr, X86::VPUNPCKLQDQZ256rr },
6869 { X86::VUNPCKHPDZ256rm, X86::VUNPCKHPDZ256rm, X86::VPUNPCKHQDQZ256rm },
6870 { X86::VUNPCKHPDZ256rr, X86::VUNPCKHPDZ256rr, X86::VPUNPCKHQDQZ256rr },
6871 { X86::VUNPCKLPSZ256rm, X86::VUNPCKLPSZ256rm, X86::VPUNPCKLDQZ256rm },
6872 { X86::VUNPCKLPSZ256rr, X86::VUNPCKLPSZ256rr, X86::VPUNPCKLDQZ256rr },
6873 { X86::VUNPCKHPSZ256rm, X86::VUNPCKHPSZ256rm, X86::VPUNPCKHDQZ256rm },
6874 { X86::VUNPCKHPSZ256rr, X86::VUNPCKHPSZ256rr, X86::VPUNPCKHDQZ256rr },
6875 { X86::VUNPCKLPDZ128rm, X86::VUNPCKLPDZ128rm, X86::VPUNPCKLQDQZ128rm },
6876 { X86::VMOVLHPSZrr, X86::VUNPCKLPDZ128rr, X86::VPUNPCKLQDQZ128rr },
6877 { X86::VUNPCKHPDZ128rm, X86::VUNPCKHPDZ128rm, X86::VPUNPCKHQDQZ128rm },
6878 { X86::VUNPCKHPDZ128rr, X86::VUNPCKHPDZ128rr, X86::VPUNPCKHQDQZ128rr },
6879 { X86::VUNPCKLPSZ128rm, X86::VUNPCKLPSZ128rm, X86::VPUNPCKLDQZ128rm },
6880 { X86::VUNPCKLPSZ128rr, X86::VUNPCKLPSZ128rr, X86::VPUNPCKLDQZ128rr },
6881 { X86::VUNPCKHPSZ128rm, X86::VUNPCKHPSZ128rm, X86::VPUNPCKHDQZ128rm },
6882 { X86::VUNPCKHPSZ128rr, X86::VUNPCKHPSZ128rr, X86::VPUNPCKHDQZ128rr },
6883 { X86::VUNPCKLPDZrm, X86::VUNPCKLPDZrm, X86::VPUNPCKLQDQZrm },
6884 { X86::VUNPCKLPDZrr, X86::VUNPCKLPDZrr, X86::VPUNPCKLQDQZrr },
6885 { X86::VUNPCKHPDZrm, X86::VUNPCKHPDZrm, X86::VPUNPCKHQDQZrm },
6886 { X86::VUNPCKHPDZrr, X86::VUNPCKHPDZrr, X86::VPUNPCKHQDQZrr },
6887 { X86::VUNPCKLPSZrm, X86::VUNPCKLPSZrm, X86::VPUNPCKLDQZrm },
6888 { X86::VUNPCKLPSZrr, X86::VUNPCKLPSZrr, X86::VPUNPCKLDQZrr },
6889 { X86::VUNPCKHPSZrm, X86::VUNPCKHPSZrm, X86::VPUNPCKHDQZrm },
6890 { X86::VUNPCKHPSZrr, X86::VUNPCKHPSZrr, X86::VPUNPCKHDQZrr },
6891 { X86::VEXTRACTPSZmr, X86::VEXTRACTPSZmr, X86::VPEXTRDZmr },
6892 { X86::VEXTRACTPSZrr, X86::VEXTRACTPSZrr, X86::VPEXTRDZrr },
6893 };
6894
6895 static const uint16_t ReplaceableInstrsAVX2[][3] = {
6896 //PackedSingle PackedDouble PackedInt
6897 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm },
6898 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr },
6899 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm },
6900 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr },
6901 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm },
6902 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr },
6903 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm },
6904 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr },
6905 { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm },
6906 { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr },
6907 { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm},
6908 { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr},
6909 { X86::VMOVDDUPrm, X86::VMOVDDUPrm, X86::VPBROADCASTQrm},
6910 { X86::VMOVDDUPrr, X86::VMOVDDUPrr, X86::VPBROADCASTQrr},
6911 { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr},
6912 { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm},
6913 { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr},
6914 { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm},
6915 { X86::VBROADCASTF128, X86::VBROADCASTF128, X86::VBROADCASTI128 },
6916 { X86::VBLENDPSYrri, X86::VBLENDPSYrri, X86::VPBLENDDYrri },
6917 { X86::VBLENDPSYrmi, X86::VBLENDPSYrmi, X86::VPBLENDDYrmi },
6918 { X86::VPERMILPSYmi, X86::VPERMILPSYmi, X86::VPSHUFDYmi },
6919 { X86::VPERMILPSYri, X86::VPERMILPSYri, X86::VPSHUFDYri },
6920 { X86::VUNPCKLPDYrm, X86::VUNPCKLPDYrm, X86::VPUNPCKLQDQYrm },
6921 { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrr, X86::VPUNPCKLQDQYrr },
6922 { X86::VUNPCKHPDYrm, X86::VUNPCKHPDYrm, X86::VPUNPCKHQDQYrm },
6923 { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrr, X86::VPUNPCKHQDQYrr },
6924 { X86::VUNPCKLPSYrm, X86::VUNPCKLPSYrm, X86::VPUNPCKLDQYrm },
6925 { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrr, X86::VPUNPCKLDQYrr },
6926 { X86::VUNPCKHPSYrm, X86::VUNPCKHPSYrm, X86::VPUNPCKHDQYrm },
6927 { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrr, X86::VPUNPCKHDQYrr },
6928 };
6929
6930 static const uint16_t ReplaceableInstrsFP[][3] = {
6931 //PackedSingle PackedDouble
6932 { X86::MOVLPSrm, X86::MOVLPDrm, X86::INSTRUCTION_LIST_END },
6933 { X86::MOVHPSrm, X86::MOVHPDrm, X86::INSTRUCTION_LIST_END },
6934 { X86::MOVHPSmr, X86::MOVHPDmr, X86::INSTRUCTION_LIST_END },
6935 { X86::VMOVLPSrm, X86::VMOVLPDrm, X86::INSTRUCTION_LIST_END },
6936 { X86::VMOVHPSrm, X86::VMOVHPDrm, X86::INSTRUCTION_LIST_END },
6937 { X86::VMOVHPSmr, X86::VMOVHPDmr, X86::INSTRUCTION_LIST_END },
6938 { X86::VMOVLPSZ128rm, X86::VMOVLPDZ128rm, X86::INSTRUCTION_LIST_END },
6939 { X86::VMOVHPSZ128rm, X86::VMOVHPDZ128rm, X86::INSTRUCTION_LIST_END },
6940 { X86::VMOVHPSZ128mr, X86::VMOVHPDZ128mr, X86::INSTRUCTION_LIST_END },
6941 };
6942
6943 static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = {
6944 //PackedSingle PackedDouble PackedInt
6945 { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr },
6946 { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr },
6947 { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm },
6948 { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr },
6949 };
6950
6951 static const uint16_t ReplaceableInstrsAVX512[][4] = {
6952 // Two integer columns for 64-bit and 32-bit elements.
6953 //PackedSingle PackedDouble PackedInt PackedInt
6954 { X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr },
6955 { X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm },
6956 { X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr },
6957 { X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr },
6958 { X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm },
6959 { X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr },
6960 { X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm },
6961 { X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr },
6962 { X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr },
6963 { X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm },
6964 { X86::VMOVAPSZmr, X86::VMOVAPDZmr, X86::VMOVDQA64Zmr, X86::VMOVDQA32Zmr },
6965 { X86::VMOVAPSZrm, X86::VMOVAPDZrm, X86::VMOVDQA64Zrm, X86::VMOVDQA32Zrm },
6966 { X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr },
6967 { X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr },
6968 { X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm },
6969 };
6970
6971 static const uint16_t ReplaceableInstrsAVX512DQ[][4] = {
6972 // Two integer columns for 64-bit and 32-bit elements.
6973 //PackedSingle PackedDouble PackedInt PackedInt
6974 { X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
6975 { X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
6976 { X86::VANDPSZ128rm, X86::VANDPDZ128rm, X86::VPANDQZ128rm, X86::VPANDDZ128rm },
6977 { X86::VANDPSZ128rr, X86::VANDPDZ128rr, X86::VPANDQZ128rr, X86::VPANDDZ128rr },
6978 { X86::VORPSZ128rm, X86::VORPDZ128rm, X86::VPORQZ128rm, X86::VPORDZ128rm },
6979 { X86::VORPSZ128rr, X86::VORPDZ128rr, X86::VPORQZ128rr, X86::VPORDZ128rr },
6980 { X86::VXORPSZ128rm, X86::VXORPDZ128rm, X86::VPXORQZ128rm, X86::VPXORDZ128rm },
6981 { X86::VXORPSZ128rr, X86::VXORPDZ128rr, X86::VPXORQZ128rr, X86::VPXORDZ128rr },
6982 { X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
6983 { X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
6984 { X86::VANDPSZ256rm, X86::VANDPDZ256rm, X86::VPANDQZ256rm, X86::VPANDDZ256rm },
6985 { X86::VANDPSZ256rr, X86::VANDPDZ256rr, X86::VPANDQZ256rr, X86::VPANDDZ256rr },
6986 { X86::VORPSZ256rm, X86::VORPDZ256rm, X86::VPORQZ256rm, X86::VPORDZ256rm },
6987 { X86::VORPSZ256rr, X86::VORPDZ256rr, X86::VPORQZ256rr, X86::VPORDZ256rr },
6988 { X86::VXORPSZ256rm, X86::VXORPDZ256rm, X86::VPXORQZ256rm, X86::VPXORDZ256rm },
6989 { X86::VXORPSZ256rr, X86::VXORPDZ256rr, X86::VPXORQZ256rr, X86::VPXORDZ256rr },
6990 { X86::VANDNPSZrm, X86::VANDNPDZrm, X86::VPANDNQZrm, X86::VPANDNDZrm },
6991 { X86::VANDNPSZrr, X86::VANDNPDZrr, X86::VPANDNQZrr, X86::VPANDNDZrr },
6992 { X86::VANDPSZrm, X86::VANDPDZrm, X86::VPANDQZrm, X86::VPANDDZrm },
6993 { X86::VANDPSZrr, X86::VANDPDZrr, X86::VPANDQZrr, X86::VPANDDZrr },
6994 { X86::VORPSZrm, X86::VORPDZrm, X86::VPORQZrm, X86::VPORDZrm },
6995 { X86::VORPSZrr, X86::VORPDZrr, X86::VPORQZrr, X86::VPORDZrr },
6996 { X86::VXORPSZrm, X86::VXORPDZrm, X86::VPXORQZrm, X86::VPXORDZrm },
6997 { X86::VXORPSZrr, X86::VXORPDZrr, X86::VPXORQZrr, X86::VPXORDZrr },
6998 };
6999
7000 static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = {
7001 // Two integer columns for 64-bit and 32-bit elements.
7002 //PackedSingle PackedDouble
7003 //PackedInt PackedInt
7004 { X86::VANDNPSZ128rmk, X86::VANDNPDZ128rmk,
7005 X86::VPANDNQZ128rmk, X86::VPANDNDZ128rmk },
7006 { X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz,
7007 X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz },
7008 { X86::VANDNPSZ128rrk, X86::VANDNPDZ128rrk,
7009 X86::VPANDNQZ128rrk, X86::VPANDNDZ128rrk },
7010 { X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz,
7011 X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz },
7012 { X86::VANDPSZ128rmk, X86::VANDPDZ128rmk,
7013 X86::VPANDQZ128rmk, X86::VPANDDZ128rmk },
7014 { X86::VANDPSZ128rmkz, X86::VANDPDZ128rmkz,
7015 X86::VPANDQZ128rmkz, X86::VPANDDZ128rmkz },
7016 { X86::VANDPSZ128rrk, X86::VANDPDZ128rrk,
7017 X86::VPANDQZ128rrk, X86::VPANDDZ128rrk },
7018 { X86::VANDPSZ128rrkz, X86::VANDPDZ128rrkz,
7019 X86::VPANDQZ128rrkz, X86::VPANDDZ128rrkz },
7020 { X86::VORPSZ128rmk, X86::VORPDZ128rmk,
7021 X86::VPORQZ128rmk, X86::VPORDZ128rmk },
7022 { X86::VORPSZ128rmkz, X86::VORPDZ128rmkz,
7023 X86::VPORQZ128rmkz, X86::VPORDZ128rmkz },
7024 { X86::VORPSZ128rrk, X86::VORPDZ128rrk,
7025 X86::VPORQZ128rrk, X86::VPORDZ128rrk },
7026 { X86::VORPSZ128rrkz, X86::VORPDZ128rrkz,
7027 X86::VPORQZ128rrkz, X86::VPORDZ128rrkz },
7028 { X86::VXORPSZ128rmk, X86::VXORPDZ128rmk,
7029 X86::VPXORQZ128rmk, X86::VPXORDZ128rmk },
7030 { X86::VXORPSZ128rmkz, X86::VXORPDZ128rmkz,
7031 X86::VPXORQZ128rmkz, X86::VPXORDZ128rmkz },
7032 { X86::VXORPSZ128rrk, X86::VXORPDZ128rrk,
7033 X86::VPXORQZ128rrk, X86::VPXORDZ128rrk },
7034 { X86::VXORPSZ128rrkz, X86::VXORPDZ128rrkz,
7035 X86::VPXORQZ128rrkz, X86::VPXORDZ128rrkz },
7036 { X86::VANDNPSZ256rmk, X86::VANDNPDZ256rmk,
7037 X86::VPANDNQZ256rmk, X86::VPANDNDZ256rmk },
7038 { X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz,
7039 X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz },
7040 { X86::VANDNPSZ256rrk, X86::VANDNPDZ256rrk,
7041 X86::VPANDNQZ256rrk, X86::VPANDNDZ256rrk },
7042 { X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz,
7043 X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz },
7044 { X86::VANDPSZ256rmk, X86::VANDPDZ256rmk,
7045 X86::VPANDQZ256rmk, X86::VPANDDZ256rmk },
7046 { X86::VANDPSZ256rmkz, X86::VANDPDZ256rmkz,
7047 X86::VPANDQZ256rmkz, X86::VPANDDZ256rmkz },
7048 { X86::VANDPSZ256rrk, X86::VANDPDZ256rrk,
7049 X86::VPANDQZ256rrk, X86::VPANDDZ256rrk },
7050 { X86::VANDPSZ256rrkz, X86::VANDPDZ256rrkz,
7051 X86::VPANDQZ256rrkz, X86::VPANDDZ256rrkz },
7052 { X86::VORPSZ256rmk, X86::VORPDZ256rmk,
7053 X86::VPORQZ256rmk, X86::VPORDZ256rmk },
7054 { X86::VORPSZ256rmkz, X86::VORPDZ256rmkz,
7055 X86::VPORQZ256rmkz, X86::VPORDZ256rmkz },
7056 { X86::VORPSZ256rrk, X86::VORPDZ256rrk,
7057 X86::VPORQZ256rrk, X86::VPORDZ256rrk },
7058 { X86::VORPSZ256rrkz, X86::VORPDZ256rrkz,
7059 X86::VPORQZ256rrkz, X86::VPORDZ256rrkz },
7060 { X86::VXORPSZ256rmk, X86::VXORPDZ256rmk,
7061 X86::VPXORQZ256rmk, X86::VPXORDZ256rmk },
7062 { X86::VXORPSZ256rmkz, X86::VXORPDZ256rmkz,
7063 X86::VPXORQZ256rmkz, X86::VPXORDZ256rmkz },
7064 { X86::VXORPSZ256rrk, X86::VXORPDZ256rrk,
7065 X86::VPXORQZ256rrk, X86::VPXORDZ256rrk },
7066 { X86::VXORPSZ256rrkz, X86::VXORPDZ256rrkz,
7067 X86::VPXORQZ256rrkz, X86::VPXORDZ256rrkz },
7068 { X86::VANDNPSZrmk, X86::VANDNPDZrmk,
7069 X86::VPANDNQZrmk, X86::VPANDNDZrmk },
7070 { X86::VANDNPSZrmkz, X86::VANDNPDZrmkz,
7071 X86::VPANDNQZrmkz, X86::VPANDNDZrmkz },
7072 { X86::VANDNPSZrrk, X86::VANDNPDZrrk,
7073 X86::VPANDNQZrrk, X86::VPANDNDZrrk },
7074 { X86::VANDNPSZrrkz, X86::VANDNPDZrrkz,
7075 X86::VPANDNQZrrkz, X86::VPANDNDZrrkz },
7076 { X86::VANDPSZrmk, X86::VANDPDZrmk,
7077 X86::VPANDQZrmk, X86::VPANDDZrmk },
7078 { X86::VANDPSZrmkz, X86::VANDPDZrmkz,
7079 X86::VPANDQZrmkz, X86::VPANDDZrmkz },
7080 { X86::VANDPSZrrk, X86::VANDPDZrrk,
7081 X86::VPANDQZrrk, X86::VPANDDZrrk },
7082 { X86::VANDPSZrrkz, X86::VANDPDZrrkz,
7083 X86::VPANDQZrrkz, X86::VPANDDZrrkz },
7084 { X86::VORPSZrmk, X86::VORPDZrmk,
7085 X86::VPORQZrmk, X86::VPORDZrmk },
7086 { X86::VORPSZrmkz, X86::VORPDZrmkz,
7087 X86::VPORQZrmkz, X86::VPORDZrmkz },
7088 { X86::VORPSZrrk, X86::VORPDZrrk,
7089 X86::VPORQZrrk, X86::VPORDZrrk },
7090 { X86::VORPSZrrkz, X86::VORPDZrrkz,
7091 X86::VPORQZrrkz, X86::VPORDZrrkz },
7092 { X86::VXORPSZrmk, X86::VXORPDZrmk,
7093 X86::VPXORQZrmk, X86::VPXORDZrmk },
7094 { X86::VXORPSZrmkz, X86::VXORPDZrmkz,
7095 X86::VPXORQZrmkz, X86::VPXORDZrmkz },
7096 { X86::VXORPSZrrk, X86::VXORPDZrrk,
7097 X86::VPXORQZrrk, X86::VPXORDZrrk },
7098 { X86::VXORPSZrrkz, X86::VXORPDZrrkz,
7099 X86::VPXORQZrrkz, X86::VPXORDZrrkz },
7100 // Broadcast loads can be handled the same as masked operations to avoid
7101 // changing element size.
7102 { X86::VANDNPSZ128rmb, X86::VANDNPDZ128rmb,
7103 X86::VPANDNQZ128rmb, X86::VPANDNDZ128rmb },
7104 { X86::VANDPSZ128rmb, X86::VANDPDZ128rmb,
7105 X86::VPANDQZ128rmb, X86::VPANDDZ128rmb },
7106 { X86::VORPSZ128rmb, X86::VORPDZ128rmb,
7107 X86::VPORQZ128rmb, X86::VPORDZ128rmb },
7108 { X86::VXORPSZ128rmb, X86::VXORPDZ128rmb,
7109 X86::VPXORQZ128rmb, X86::VPXORDZ128rmb },
7110 { X86::VANDNPSZ256rmb, X86::VANDNPDZ256rmb,
7111 X86::VPANDNQZ256rmb, X86::VPANDNDZ256rmb },
7112 { X86::VANDPSZ256rmb, X86::VANDPDZ256rmb,
7113 X86::VPANDQZ256rmb, X86::VPANDDZ256rmb },
7114 { X86::VORPSZ256rmb, X86::VORPDZ256rmb,
7115 X86::VPORQZ256rmb, X86::VPORDZ256rmb },
7116 { X86::VXORPSZ256rmb, X86::VXORPDZ256rmb,
7117 X86::VPXORQZ256rmb, X86::VPXORDZ256rmb },
7118 { X86::VANDNPSZrmb, X86::VANDNPDZrmb,
7119 X86::VPANDNQZrmb, X86::VPANDNDZrmb },
7120 { X86::VANDPSZrmb, X86::VANDPDZrmb,
7121 X86::VPANDQZrmb, X86::VPANDDZrmb },
7122 { X86::VANDPSZrmb, X86::VANDPDZrmb,
7123 X86::VPANDQZrmb, X86::VPANDDZrmb },
7124 { X86::VORPSZrmb, X86::VORPDZrmb,
7125 X86::VPORQZrmb, X86::VPORDZrmb },
7126 { X86::VXORPSZrmb, X86::VXORPDZrmb,
7127 X86::VPXORQZrmb, X86::VPXORDZrmb },
7128 { X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk,
7129 X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk },
7130 { X86::VANDPSZ128rmbk, X86::VANDPDZ128rmbk,
7131 X86::VPANDQZ128rmbk, X86::VPANDDZ128rmbk },
7132 { X86::VORPSZ128rmbk, X86::VORPDZ128rmbk,
7133 X86::VPORQZ128rmbk, X86::VPORDZ128rmbk },
7134 { X86::VXORPSZ128rmbk, X86::VXORPDZ128rmbk,
7135 X86::VPXORQZ128rmbk, X86::VPXORDZ128rmbk },
7136 { X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk,
7137 X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk },
7138 { X86::VANDPSZ256rmbk, X86::VANDPDZ256rmbk,
7139 X86::VPANDQZ256rmbk, X86::VPANDDZ256rmbk },
7140 { X86::VORPSZ256rmbk, X86::VORPDZ256rmbk,
7141 X86::VPORQZ256rmbk, X86::VPORDZ256rmbk },
7142 { X86::VXORPSZ256rmbk, X86::VXORPDZ256rmbk,
7143 X86::VPXORQZ256rmbk, X86::VPXORDZ256rmbk },
7144 { X86::VANDNPSZrmbk, X86::VANDNPDZrmbk,
7145 X86::VPANDNQZrmbk, X86::VPANDNDZrmbk },
7146 { X86::VANDPSZrmbk, X86::VANDPDZrmbk,
7147 X86::VPANDQZrmbk, X86::VPANDDZrmbk },
7148 { X86::VANDPSZrmbk, X86::VANDPDZrmbk,
7149 X86::VPANDQZrmbk, X86::VPANDDZrmbk },
7150 { X86::VORPSZrmbk, X86::VORPDZrmbk,
7151 X86::VPORQZrmbk, X86::VPORDZrmbk },
7152 { X86::VXORPSZrmbk, X86::VXORPDZrmbk,
7153 X86::VPXORQZrmbk, X86::VPXORDZrmbk },
7154 { X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz,
7155 X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz},
7156 { X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz,
7157 X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz },
7158 { X86::VORPSZ128rmbkz, X86::VORPDZ128rmbkz,
7159 X86::VPORQZ128rmbkz, X86::VPORDZ128rmbkz },
7160 { X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz,
7161 X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz },
7162 { X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz,
7163 X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz},
7164 { X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz,
7165 X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz },
7166 { X86::VORPSZ256rmbkz, X86::VORPDZ256rmbkz,
7167 X86::VPORQZ256rmbkz, X86::VPORDZ256rmbkz },
7168 { X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz,
7169 X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz },
7170 { X86::VANDNPSZrmbkz, X86::VANDNPDZrmbkz,
7171 X86::VPANDNQZrmbkz, X86::VPANDNDZrmbkz },
7172 { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz,
7173 X86::VPANDQZrmbkz, X86::VPANDDZrmbkz },
7174 { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz,
7175 X86::VPANDQZrmbkz, X86::VPANDDZrmbkz },
7176 { X86::VORPSZrmbkz, X86::VORPDZrmbkz,
7177 X86::VPORQZrmbkz, X86::VPORDZrmbkz },
7178 { X86::VXORPSZrmbkz, X86::VXORPDZrmbkz,
7179 X86::VPXORQZrmbkz, X86::VPXORDZrmbkz },
7180 };
7181
7182 // NOTE: These should only be used by the custom domain methods.
7183 static const uint16_t ReplaceableBlendInstrs[][3] = {
7184 //PackedSingle PackedDouble PackedInt
7185 { X86::BLENDPSrmi, X86::BLENDPDrmi, X86::PBLENDWrmi },
7186 { X86::BLENDPSrri, X86::BLENDPDrri, X86::PBLENDWrri },
7187 { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDWrmi },
7188 { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDWrri },
7189 { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDWYrmi },
7190 { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDWYrri },
7191 };
7192 static const uint16_t ReplaceableBlendAVX2Instrs[][3] = {
7193 //PackedSingle PackedDouble PackedInt
7194 { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDDrmi },
7195 { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDDrri },
7196 { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDDYrmi },
7197 { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDDYrri },
7198 };
7199
7200 // Special table for changing EVEX logic instructions to VEX.
7201 // TODO: Should we run EVEX->VEX earlier?
7202 static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = {
7203 // Two integer columns for 64-bit and 32-bit elements.
7204 //PackedSingle PackedDouble PackedInt PackedInt
7205 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
7206 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
7207 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDQZ128rm, X86::VPANDDZ128rm },
7208 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDQZ128rr, X86::VPANDDZ128rr },
7209 { X86::VORPSrm, X86::VORPDrm, X86::VPORQZ128rm, X86::VPORDZ128rm },
7210 { X86::VORPSrr, X86::VORPDrr, X86::VPORQZ128rr, X86::VPORDZ128rr },
7211 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORQZ128rm, X86::VPXORDZ128rm },
7212 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORQZ128rr, X86::VPXORDZ128rr },
7213 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
7214 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
7215 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDQZ256rm, X86::VPANDDZ256rm },
7216 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDQZ256rr, X86::VPANDDZ256rr },
7217 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORQZ256rm, X86::VPORDZ256rm },
7218 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORQZ256rr, X86::VPORDZ256rr },
7219 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORQZ256rm, X86::VPXORDZ256rm },
7220 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORQZ256rr, X86::VPXORDZ256rr },
7221 };
7222
7223 // FIXME: Some shuffle and unpack instructions have equivalents in different
7224 // domains, but they require a bit more work than just switching opcodes.
7225
lookup(unsigned opcode,unsigned domain,ArrayRef<uint16_t[3]> Table)7226 static const uint16_t *lookup(unsigned opcode, unsigned domain,
7227 ArrayRef<uint16_t[3]> Table) {
7228 for (const uint16_t (&Row)[3] : Table)
7229 if (Row[domain-1] == opcode)
7230 return Row;
7231 return nullptr;
7232 }
7233
lookupAVX512(unsigned opcode,unsigned domain,ArrayRef<uint16_t[4]> Table)7234 static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain,
7235 ArrayRef<uint16_t[4]> Table) {
7236 // If this is the integer domain make sure to check both integer columns.
7237 for (const uint16_t (&Row)[4] : Table)
7238 if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode))
7239 return Row;
7240 return nullptr;
7241 }
7242
7243 // Helper to attempt to widen/narrow blend masks.
AdjustBlendMask(unsigned OldMask,unsigned OldWidth,unsigned NewWidth,unsigned * pNewMask=nullptr)7244 static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth,
7245 unsigned NewWidth, unsigned *pNewMask = nullptr) {
7246 assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
7247 "Illegal blend mask scale");
7248 unsigned NewMask = 0;
7249
7250 if ((OldWidth % NewWidth) == 0) {
7251 unsigned Scale = OldWidth / NewWidth;
7252 unsigned SubMask = (1u << Scale) - 1;
7253 for (unsigned i = 0; i != NewWidth; ++i) {
7254 unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
7255 if (Sub == SubMask)
7256 NewMask |= (1u << i);
7257 else if (Sub != 0x0)
7258 return false;
7259 }
7260 } else {
7261 unsigned Scale = NewWidth / OldWidth;
7262 unsigned SubMask = (1u << Scale) - 1;
7263 for (unsigned i = 0; i != OldWidth; ++i) {
7264 if (OldMask & (1 << i)) {
7265 NewMask |= (SubMask << (i * Scale));
7266 }
7267 }
7268 }
7269
7270 if (pNewMask)
7271 *pNewMask = NewMask;
7272 return true;
7273 }
7274
getExecutionDomainCustom(const MachineInstr & MI) const7275 uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const {
7276 unsigned Opcode = MI.getOpcode();
7277 unsigned NumOperands = MI.getDesc().getNumOperands();
7278
7279 auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) {
7280 uint16_t validDomains = 0;
7281 if (MI.getOperand(NumOperands - 1).isImm()) {
7282 unsigned Imm = MI.getOperand(NumOperands - 1).getImm();
7283 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4))
7284 validDomains |= 0x2; // PackedSingle
7285 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2))
7286 validDomains |= 0x4; // PackedDouble
7287 if (!Is256 || Subtarget.hasAVX2())
7288 validDomains |= 0x8; // PackedInt
7289 }
7290 return validDomains;
7291 };
7292
7293 switch (Opcode) {
7294 case X86::BLENDPDrmi:
7295 case X86::BLENDPDrri:
7296 case X86::VBLENDPDrmi:
7297 case X86::VBLENDPDrri:
7298 return GetBlendDomains(2, false);
7299 case X86::VBLENDPDYrmi:
7300 case X86::VBLENDPDYrri:
7301 return GetBlendDomains(4, true);
7302 case X86::BLENDPSrmi:
7303 case X86::BLENDPSrri:
7304 case X86::VBLENDPSrmi:
7305 case X86::VBLENDPSrri:
7306 case X86::VPBLENDDrmi:
7307 case X86::VPBLENDDrri:
7308 return GetBlendDomains(4, false);
7309 case X86::VBLENDPSYrmi:
7310 case X86::VBLENDPSYrri:
7311 case X86::VPBLENDDYrmi:
7312 case X86::VPBLENDDYrri:
7313 return GetBlendDomains(8, true);
7314 case X86::PBLENDWrmi:
7315 case X86::PBLENDWrri:
7316 case X86::VPBLENDWrmi:
7317 case X86::VPBLENDWrri:
7318 // Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks.
7319 case X86::VPBLENDWYrmi:
7320 case X86::VPBLENDWYrri:
7321 return GetBlendDomains(8, false);
7322 case X86::VPANDDZ128rr: case X86::VPANDDZ128rm:
7323 case X86::VPANDDZ256rr: case X86::VPANDDZ256rm:
7324 case X86::VPANDQZ128rr: case X86::VPANDQZ128rm:
7325 case X86::VPANDQZ256rr: case X86::VPANDQZ256rm:
7326 case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
7327 case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
7328 case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
7329 case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
7330 case X86::VPORDZ128rr: case X86::VPORDZ128rm:
7331 case X86::VPORDZ256rr: case X86::VPORDZ256rm:
7332 case X86::VPORQZ128rr: case X86::VPORQZ128rm:
7333 case X86::VPORQZ256rr: case X86::VPORQZ256rm:
7334 case X86::VPXORDZ128rr: case X86::VPXORDZ128rm:
7335 case X86::VPXORDZ256rr: case X86::VPXORDZ256rm:
7336 case X86::VPXORQZ128rr: case X86::VPXORQZ128rm:
7337 case X86::VPXORQZ256rr: case X86::VPXORQZ256rm:
7338 // If we don't have DQI see if we can still switch from an EVEX integer
7339 // instruction to a VEX floating point instruction.
7340 if (Subtarget.hasDQI())
7341 return 0;
7342
7343 if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16)
7344 return 0;
7345 if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16)
7346 return 0;
7347 // Register forms will have 3 operands. Memory form will have more.
7348 if (NumOperands == 3 &&
7349 RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16)
7350 return 0;
7351
7352 // All domains are valid.
7353 return 0xe;
7354 case X86::MOVHLPSrr:
7355 // We can swap domains when both inputs are the same register.
7356 // FIXME: This doesn't catch all the cases we would like. If the input
7357 // register isn't KILLed by the instruction, the two address instruction
7358 // pass puts a COPY on one input. The other input uses the original
7359 // register. This prevents the same physical register from being used by
7360 // both inputs.
7361 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
7362 MI.getOperand(0).getSubReg() == 0 &&
7363 MI.getOperand(1).getSubReg() == 0 &&
7364 MI.getOperand(2).getSubReg() == 0)
7365 return 0x6;
7366 return 0;
7367 case X86::SHUFPDrri:
7368 return 0x6;
7369 }
7370 return 0;
7371 }
7372
setExecutionDomainCustom(MachineInstr & MI,unsigned Domain) const7373 bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI,
7374 unsigned Domain) const {
7375 assert(Domain > 0 && Domain < 4 && "Invalid execution domain");
7376 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7377 assert(dom && "Not an SSE instruction");
7378
7379 unsigned Opcode = MI.getOpcode();
7380 unsigned NumOperands = MI.getDesc().getNumOperands();
7381
7382 auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) {
7383 if (MI.getOperand(NumOperands - 1).isImm()) {
7384 unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255;
7385 Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
7386 unsigned NewImm = Imm;
7387
7388 const uint16_t *table = lookup(Opcode, dom, ReplaceableBlendInstrs);
7389 if (!table)
7390 table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
7391
7392 if (Domain == 1) { // PackedSingle
7393 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
7394 } else if (Domain == 2) { // PackedDouble
7395 AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm);
7396 } else if (Domain == 3) { // PackedInt
7397 if (Subtarget.hasAVX2()) {
7398 // If we are already VPBLENDW use that, else use VPBLENDD.
7399 if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
7400 table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
7401 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
7402 }
7403 } else {
7404 assert(!Is256 && "128-bit vector expected");
7405 AdjustBlendMask(Imm, ImmWidth, 8, &NewImm);
7406 }
7407 }
7408
7409 assert(table && table[Domain - 1] && "Unknown domain op");
7410 MI.setDesc(get(table[Domain - 1]));
7411 MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
7412 }
7413 return true;
7414 };
7415
7416 switch (Opcode) {
7417 case X86::BLENDPDrmi:
7418 case X86::BLENDPDrri:
7419 case X86::VBLENDPDrmi:
7420 case X86::VBLENDPDrri:
7421 return SetBlendDomain(2, false);
7422 case X86::VBLENDPDYrmi:
7423 case X86::VBLENDPDYrri:
7424 return SetBlendDomain(4, true);
7425 case X86::BLENDPSrmi:
7426 case X86::BLENDPSrri:
7427 case X86::VBLENDPSrmi:
7428 case X86::VBLENDPSrri:
7429 case X86::VPBLENDDrmi:
7430 case X86::VPBLENDDrri:
7431 return SetBlendDomain(4, false);
7432 case X86::VBLENDPSYrmi:
7433 case X86::VBLENDPSYrri:
7434 case X86::VPBLENDDYrmi:
7435 case X86::VPBLENDDYrri:
7436 return SetBlendDomain(8, true);
7437 case X86::PBLENDWrmi:
7438 case X86::PBLENDWrri:
7439 case X86::VPBLENDWrmi:
7440 case X86::VPBLENDWrri:
7441 return SetBlendDomain(8, false);
7442 case X86::VPBLENDWYrmi:
7443 case X86::VPBLENDWYrri:
7444 return SetBlendDomain(16, true);
7445 case X86::VPANDDZ128rr: case X86::VPANDDZ128rm:
7446 case X86::VPANDDZ256rr: case X86::VPANDDZ256rm:
7447 case X86::VPANDQZ128rr: case X86::VPANDQZ128rm:
7448 case X86::VPANDQZ256rr: case X86::VPANDQZ256rm:
7449 case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
7450 case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
7451 case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
7452 case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
7453 case X86::VPORDZ128rr: case X86::VPORDZ128rm:
7454 case X86::VPORDZ256rr: case X86::VPORDZ256rm:
7455 case X86::VPORQZ128rr: case X86::VPORQZ128rm:
7456 case X86::VPORQZ256rr: case X86::VPORQZ256rm:
7457 case X86::VPXORDZ128rr: case X86::VPXORDZ128rm:
7458 case X86::VPXORDZ256rr: case X86::VPXORDZ256rm:
7459 case X86::VPXORQZ128rr: case X86::VPXORQZ128rm:
7460 case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: {
7461 // Without DQI, convert EVEX instructions to VEX instructions.
7462 if (Subtarget.hasDQI())
7463 return false;
7464
7465 const uint16_t *table = lookupAVX512(MI.getOpcode(), dom,
7466 ReplaceableCustomAVX512LogicInstrs);
7467 assert(table && "Instruction not found in table?");
7468 // Don't change integer Q instructions to D instructions and
7469 // use D intructions if we started with a PS instruction.
7470 if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
7471 Domain = 4;
7472 MI.setDesc(get(table[Domain - 1]));
7473 return true;
7474 }
7475 case X86::UNPCKHPDrr:
7476 case X86::MOVHLPSrr:
7477 // We just need to commute the instruction which will switch the domains.
7478 if (Domain != dom && Domain != 3 &&
7479 MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
7480 MI.getOperand(0).getSubReg() == 0 &&
7481 MI.getOperand(1).getSubReg() == 0 &&
7482 MI.getOperand(2).getSubReg() == 0) {
7483 commuteInstruction(MI, false);
7484 return true;
7485 }
7486 // We must always return true for MOVHLPSrr.
7487 if (Opcode == X86::MOVHLPSrr)
7488 return true;
7489 break;
7490 case X86::SHUFPDrri: {
7491 if (Domain == 1) {
7492 unsigned Imm = MI.getOperand(3).getImm();
7493 unsigned NewImm = 0x44;
7494 if (Imm & 1) NewImm |= 0x0a;
7495 if (Imm & 2) NewImm |= 0xa0;
7496 MI.getOperand(3).setImm(NewImm);
7497 MI.setDesc(get(X86::SHUFPSrri));
7498 }
7499 return true;
7500 }
7501 }
7502 return false;
7503 }
7504
7505 std::pair<uint16_t, uint16_t>
getExecutionDomain(const MachineInstr & MI) const7506 X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const {
7507 uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7508 unsigned opcode = MI.getOpcode();
7509 uint16_t validDomains = 0;
7510 if (domain) {
7511 // Attempt to match for custom instructions.
7512 validDomains = getExecutionDomainCustom(MI);
7513 if (validDomains)
7514 return std::make_pair(domain, validDomains);
7515
7516 if (lookup(opcode, domain, ReplaceableInstrs)) {
7517 validDomains = 0xe;
7518 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) {
7519 validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
7520 } else if (lookup(opcode, domain, ReplaceableInstrsFP)) {
7521 validDomains = 0x6;
7522 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
7523 // Insert/extract instructions should only effect domain if AVX2
7524 // is enabled.
7525 if (!Subtarget.hasAVX2())
7526 return std::make_pair(0, 0);
7527 validDomains = 0xe;
7528 } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
7529 validDomains = 0xe;
7530 } else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain,
7531 ReplaceableInstrsAVX512DQ)) {
7532 validDomains = 0xe;
7533 } else if (Subtarget.hasDQI()) {
7534 if (const uint16_t *table = lookupAVX512(opcode, domain,
7535 ReplaceableInstrsAVX512DQMasked)) {
7536 if (domain == 1 || (domain == 3 && table[3] == opcode))
7537 validDomains = 0xa;
7538 else
7539 validDomains = 0xc;
7540 }
7541 }
7542 }
7543 return std::make_pair(domain, validDomains);
7544 }
7545
setExecutionDomain(MachineInstr & MI,unsigned Domain) const7546 void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const {
7547 assert(Domain>0 && Domain<4 && "Invalid execution domain");
7548 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7549 assert(dom && "Not an SSE instruction");
7550
7551 // Attempt to match for custom instructions.
7552 if (setExecutionDomainCustom(MI, Domain))
7553 return;
7554
7555 const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs);
7556 if (!table) { // try the other table
7557 assert((Subtarget.hasAVX2() || Domain < 3) &&
7558 "256-bit vector operations only available in AVX2");
7559 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2);
7560 }
7561 if (!table) { // try the FP table
7562 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsFP);
7563 assert((!table || Domain < 3) &&
7564 "Can only select PackedSingle or PackedDouble");
7565 }
7566 if (!table) { // try the other table
7567 assert(Subtarget.hasAVX2() &&
7568 "256-bit insert/extract only available in AVX2");
7569 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
7570 }
7571 if (!table) { // try the AVX512 table
7572 assert(Subtarget.hasAVX512() && "Requires AVX-512");
7573 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512);
7574 // Don't change integer Q instructions to D instructions.
7575 if (table && Domain == 3 && table[3] == MI.getOpcode())
7576 Domain = 4;
7577 }
7578 if (!table) { // try the AVX512DQ table
7579 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
7580 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
7581 // Don't change integer Q instructions to D instructions and
7582 // use D instructions if we started with a PS instruction.
7583 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
7584 Domain = 4;
7585 }
7586 if (!table) { // try the AVX512DQMasked table
7587 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
7588 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
7589 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
7590 Domain = 4;
7591 }
7592 assert(table && "Cannot change domain");
7593 MI.setDesc(get(table[Domain - 1]));
7594 }
7595
7596 /// Return the noop instruction to use for a noop.
getNoop(MCInst & NopInst) const7597 void X86InstrInfo::getNoop(MCInst &NopInst) const {
7598 NopInst.setOpcode(X86::NOOP);
7599 }
7600
isHighLatencyDef(int opc) const7601 bool X86InstrInfo::isHighLatencyDef(int opc) const {
7602 switch (opc) {
7603 default: return false;
7604 case X86::DIVPDrm:
7605 case X86::DIVPDrr:
7606 case X86::DIVPSrm:
7607 case X86::DIVPSrr:
7608 case X86::DIVSDrm:
7609 case X86::DIVSDrm_Int:
7610 case X86::DIVSDrr:
7611 case X86::DIVSDrr_Int:
7612 case X86::DIVSSrm:
7613 case X86::DIVSSrm_Int:
7614 case X86::DIVSSrr:
7615 case X86::DIVSSrr_Int:
7616 case X86::SQRTPDm:
7617 case X86::SQRTPDr:
7618 case X86::SQRTPSm:
7619 case X86::SQRTPSr:
7620 case X86::SQRTSDm:
7621 case X86::SQRTSDm_Int:
7622 case X86::SQRTSDr:
7623 case X86::SQRTSDr_Int:
7624 case X86::SQRTSSm:
7625 case X86::SQRTSSm_Int:
7626 case X86::SQRTSSr:
7627 case X86::SQRTSSr_Int:
7628 // AVX instructions with high latency
7629 case X86::VDIVPDrm:
7630 case X86::VDIVPDrr:
7631 case X86::VDIVPDYrm:
7632 case X86::VDIVPDYrr:
7633 case X86::VDIVPSrm:
7634 case X86::VDIVPSrr:
7635 case X86::VDIVPSYrm:
7636 case X86::VDIVPSYrr:
7637 case X86::VDIVSDrm:
7638 case X86::VDIVSDrm_Int:
7639 case X86::VDIVSDrr:
7640 case X86::VDIVSDrr_Int:
7641 case X86::VDIVSSrm:
7642 case X86::VDIVSSrm_Int:
7643 case X86::VDIVSSrr:
7644 case X86::VDIVSSrr_Int:
7645 case X86::VSQRTPDm:
7646 case X86::VSQRTPDr:
7647 case X86::VSQRTPDYm:
7648 case X86::VSQRTPDYr:
7649 case X86::VSQRTPSm:
7650 case X86::VSQRTPSr:
7651 case X86::VSQRTPSYm:
7652 case X86::VSQRTPSYr:
7653 case X86::VSQRTSDm:
7654 case X86::VSQRTSDm_Int:
7655 case X86::VSQRTSDr:
7656 case X86::VSQRTSDr_Int:
7657 case X86::VSQRTSSm:
7658 case X86::VSQRTSSm_Int:
7659 case X86::VSQRTSSr:
7660 case X86::VSQRTSSr_Int:
7661 // AVX512 instructions with high latency
7662 case X86::VDIVPDZ128rm:
7663 case X86::VDIVPDZ128rmb:
7664 case X86::VDIVPDZ128rmbk:
7665 case X86::VDIVPDZ128rmbkz:
7666 case X86::VDIVPDZ128rmk:
7667 case X86::VDIVPDZ128rmkz:
7668 case X86::VDIVPDZ128rr:
7669 case X86::VDIVPDZ128rrk:
7670 case X86::VDIVPDZ128rrkz:
7671 case X86::VDIVPDZ256rm:
7672 case X86::VDIVPDZ256rmb:
7673 case X86::VDIVPDZ256rmbk:
7674 case X86::VDIVPDZ256rmbkz:
7675 case X86::VDIVPDZ256rmk:
7676 case X86::VDIVPDZ256rmkz:
7677 case X86::VDIVPDZ256rr:
7678 case X86::VDIVPDZ256rrk:
7679 case X86::VDIVPDZ256rrkz:
7680 case X86::VDIVPDZrrb:
7681 case X86::VDIVPDZrrbk:
7682 case X86::VDIVPDZrrbkz:
7683 case X86::VDIVPDZrm:
7684 case X86::VDIVPDZrmb:
7685 case X86::VDIVPDZrmbk:
7686 case X86::VDIVPDZrmbkz:
7687 case X86::VDIVPDZrmk:
7688 case X86::VDIVPDZrmkz:
7689 case X86::VDIVPDZrr:
7690 case X86::VDIVPDZrrk:
7691 case X86::VDIVPDZrrkz:
7692 case X86::VDIVPSZ128rm:
7693 case X86::VDIVPSZ128rmb:
7694 case X86::VDIVPSZ128rmbk:
7695 case X86::VDIVPSZ128rmbkz:
7696 case X86::VDIVPSZ128rmk:
7697 case X86::VDIVPSZ128rmkz:
7698 case X86::VDIVPSZ128rr:
7699 case X86::VDIVPSZ128rrk:
7700 case X86::VDIVPSZ128rrkz:
7701 case X86::VDIVPSZ256rm:
7702 case X86::VDIVPSZ256rmb:
7703 case X86::VDIVPSZ256rmbk:
7704 case X86::VDIVPSZ256rmbkz:
7705 case X86::VDIVPSZ256rmk:
7706 case X86::VDIVPSZ256rmkz:
7707 case X86::VDIVPSZ256rr:
7708 case X86::VDIVPSZ256rrk:
7709 case X86::VDIVPSZ256rrkz:
7710 case X86::VDIVPSZrrb:
7711 case X86::VDIVPSZrrbk:
7712 case X86::VDIVPSZrrbkz:
7713 case X86::VDIVPSZrm:
7714 case X86::VDIVPSZrmb:
7715 case X86::VDIVPSZrmbk:
7716 case X86::VDIVPSZrmbkz:
7717 case X86::VDIVPSZrmk:
7718 case X86::VDIVPSZrmkz:
7719 case X86::VDIVPSZrr:
7720 case X86::VDIVPSZrrk:
7721 case X86::VDIVPSZrrkz:
7722 case X86::VDIVSDZrm:
7723 case X86::VDIVSDZrr:
7724 case X86::VDIVSDZrm_Int:
7725 case X86::VDIVSDZrm_Intk:
7726 case X86::VDIVSDZrm_Intkz:
7727 case X86::VDIVSDZrr_Int:
7728 case X86::VDIVSDZrr_Intk:
7729 case X86::VDIVSDZrr_Intkz:
7730 case X86::VDIVSDZrrb_Int:
7731 case X86::VDIVSDZrrb_Intk:
7732 case X86::VDIVSDZrrb_Intkz:
7733 case X86::VDIVSSZrm:
7734 case X86::VDIVSSZrr:
7735 case X86::VDIVSSZrm_Int:
7736 case X86::VDIVSSZrm_Intk:
7737 case X86::VDIVSSZrm_Intkz:
7738 case X86::VDIVSSZrr_Int:
7739 case X86::VDIVSSZrr_Intk:
7740 case X86::VDIVSSZrr_Intkz:
7741 case X86::VDIVSSZrrb_Int:
7742 case X86::VDIVSSZrrb_Intk:
7743 case X86::VDIVSSZrrb_Intkz:
7744 case X86::VSQRTPDZ128m:
7745 case X86::VSQRTPDZ128mb:
7746 case X86::VSQRTPDZ128mbk:
7747 case X86::VSQRTPDZ128mbkz:
7748 case X86::VSQRTPDZ128mk:
7749 case X86::VSQRTPDZ128mkz:
7750 case X86::VSQRTPDZ128r:
7751 case X86::VSQRTPDZ128rk:
7752 case X86::VSQRTPDZ128rkz:
7753 case X86::VSQRTPDZ256m:
7754 case X86::VSQRTPDZ256mb:
7755 case X86::VSQRTPDZ256mbk:
7756 case X86::VSQRTPDZ256mbkz:
7757 case X86::VSQRTPDZ256mk:
7758 case X86::VSQRTPDZ256mkz:
7759 case X86::VSQRTPDZ256r:
7760 case X86::VSQRTPDZ256rk:
7761 case X86::VSQRTPDZ256rkz:
7762 case X86::VSQRTPDZm:
7763 case X86::VSQRTPDZmb:
7764 case X86::VSQRTPDZmbk:
7765 case X86::VSQRTPDZmbkz:
7766 case X86::VSQRTPDZmk:
7767 case X86::VSQRTPDZmkz:
7768 case X86::VSQRTPDZr:
7769 case X86::VSQRTPDZrb:
7770 case X86::VSQRTPDZrbk:
7771 case X86::VSQRTPDZrbkz:
7772 case X86::VSQRTPDZrk:
7773 case X86::VSQRTPDZrkz:
7774 case X86::VSQRTPSZ128m:
7775 case X86::VSQRTPSZ128mb:
7776 case X86::VSQRTPSZ128mbk:
7777 case X86::VSQRTPSZ128mbkz:
7778 case X86::VSQRTPSZ128mk:
7779 case X86::VSQRTPSZ128mkz:
7780 case X86::VSQRTPSZ128r:
7781 case X86::VSQRTPSZ128rk:
7782 case X86::VSQRTPSZ128rkz:
7783 case X86::VSQRTPSZ256m:
7784 case X86::VSQRTPSZ256mb:
7785 case X86::VSQRTPSZ256mbk:
7786 case X86::VSQRTPSZ256mbkz:
7787 case X86::VSQRTPSZ256mk:
7788 case X86::VSQRTPSZ256mkz:
7789 case X86::VSQRTPSZ256r:
7790 case X86::VSQRTPSZ256rk:
7791 case X86::VSQRTPSZ256rkz:
7792 case X86::VSQRTPSZm:
7793 case X86::VSQRTPSZmb:
7794 case X86::VSQRTPSZmbk:
7795 case X86::VSQRTPSZmbkz:
7796 case X86::VSQRTPSZmk:
7797 case X86::VSQRTPSZmkz:
7798 case X86::VSQRTPSZr:
7799 case X86::VSQRTPSZrb:
7800 case X86::VSQRTPSZrbk:
7801 case X86::VSQRTPSZrbkz:
7802 case X86::VSQRTPSZrk:
7803 case X86::VSQRTPSZrkz:
7804 case X86::VSQRTSDZm:
7805 case X86::VSQRTSDZm_Int:
7806 case X86::VSQRTSDZm_Intk:
7807 case X86::VSQRTSDZm_Intkz:
7808 case X86::VSQRTSDZr:
7809 case X86::VSQRTSDZr_Int:
7810 case X86::VSQRTSDZr_Intk:
7811 case X86::VSQRTSDZr_Intkz:
7812 case X86::VSQRTSDZrb_Int:
7813 case X86::VSQRTSDZrb_Intk:
7814 case X86::VSQRTSDZrb_Intkz:
7815 case X86::VSQRTSSZm:
7816 case X86::VSQRTSSZm_Int:
7817 case X86::VSQRTSSZm_Intk:
7818 case X86::VSQRTSSZm_Intkz:
7819 case X86::VSQRTSSZr:
7820 case X86::VSQRTSSZr_Int:
7821 case X86::VSQRTSSZr_Intk:
7822 case X86::VSQRTSSZr_Intkz:
7823 case X86::VSQRTSSZrb_Int:
7824 case X86::VSQRTSSZrb_Intk:
7825 case X86::VSQRTSSZrb_Intkz:
7826
7827 case X86::VGATHERDPDYrm:
7828 case X86::VGATHERDPDZ128rm:
7829 case X86::VGATHERDPDZ256rm:
7830 case X86::VGATHERDPDZrm:
7831 case X86::VGATHERDPDrm:
7832 case X86::VGATHERDPSYrm:
7833 case X86::VGATHERDPSZ128rm:
7834 case X86::VGATHERDPSZ256rm:
7835 case X86::VGATHERDPSZrm:
7836 case X86::VGATHERDPSrm:
7837 case X86::VGATHERPF0DPDm:
7838 case X86::VGATHERPF0DPSm:
7839 case X86::VGATHERPF0QPDm:
7840 case X86::VGATHERPF0QPSm:
7841 case X86::VGATHERPF1DPDm:
7842 case X86::VGATHERPF1DPSm:
7843 case X86::VGATHERPF1QPDm:
7844 case X86::VGATHERPF1QPSm:
7845 case X86::VGATHERQPDYrm:
7846 case X86::VGATHERQPDZ128rm:
7847 case X86::VGATHERQPDZ256rm:
7848 case X86::VGATHERQPDZrm:
7849 case X86::VGATHERQPDrm:
7850 case X86::VGATHERQPSYrm:
7851 case X86::VGATHERQPSZ128rm:
7852 case X86::VGATHERQPSZ256rm:
7853 case X86::VGATHERQPSZrm:
7854 case X86::VGATHERQPSrm:
7855 case X86::VPGATHERDDYrm:
7856 case X86::VPGATHERDDZ128rm:
7857 case X86::VPGATHERDDZ256rm:
7858 case X86::VPGATHERDDZrm:
7859 case X86::VPGATHERDDrm:
7860 case X86::VPGATHERDQYrm:
7861 case X86::VPGATHERDQZ128rm:
7862 case X86::VPGATHERDQZ256rm:
7863 case X86::VPGATHERDQZrm:
7864 case X86::VPGATHERDQrm:
7865 case X86::VPGATHERQDYrm:
7866 case X86::VPGATHERQDZ128rm:
7867 case X86::VPGATHERQDZ256rm:
7868 case X86::VPGATHERQDZrm:
7869 case X86::VPGATHERQDrm:
7870 case X86::VPGATHERQQYrm:
7871 case X86::VPGATHERQQZ128rm:
7872 case X86::VPGATHERQQZ256rm:
7873 case X86::VPGATHERQQZrm:
7874 case X86::VPGATHERQQrm:
7875 case X86::VSCATTERDPDZ128mr:
7876 case X86::VSCATTERDPDZ256mr:
7877 case X86::VSCATTERDPDZmr:
7878 case X86::VSCATTERDPSZ128mr:
7879 case X86::VSCATTERDPSZ256mr:
7880 case X86::VSCATTERDPSZmr:
7881 case X86::VSCATTERPF0DPDm:
7882 case X86::VSCATTERPF0DPSm:
7883 case X86::VSCATTERPF0QPDm:
7884 case X86::VSCATTERPF0QPSm:
7885 case X86::VSCATTERPF1DPDm:
7886 case X86::VSCATTERPF1DPSm:
7887 case X86::VSCATTERPF1QPDm:
7888 case X86::VSCATTERPF1QPSm:
7889 case X86::VSCATTERQPDZ128mr:
7890 case X86::VSCATTERQPDZ256mr:
7891 case X86::VSCATTERQPDZmr:
7892 case X86::VSCATTERQPSZ128mr:
7893 case X86::VSCATTERQPSZ256mr:
7894 case X86::VSCATTERQPSZmr:
7895 case X86::VPSCATTERDDZ128mr:
7896 case X86::VPSCATTERDDZ256mr:
7897 case X86::VPSCATTERDDZmr:
7898 case X86::VPSCATTERDQZ128mr:
7899 case X86::VPSCATTERDQZ256mr:
7900 case X86::VPSCATTERDQZmr:
7901 case X86::VPSCATTERQDZ128mr:
7902 case X86::VPSCATTERQDZ256mr:
7903 case X86::VPSCATTERQDZmr:
7904 case X86::VPSCATTERQQZ128mr:
7905 case X86::VPSCATTERQQZ256mr:
7906 case X86::VPSCATTERQQZmr:
7907 return true;
7908 }
7909 }
7910
hasHighOperandLatency(const TargetSchedModel & SchedModel,const MachineRegisterInfo * MRI,const MachineInstr & DefMI,unsigned DefIdx,const MachineInstr & UseMI,unsigned UseIdx) const7911 bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
7912 const MachineRegisterInfo *MRI,
7913 const MachineInstr &DefMI,
7914 unsigned DefIdx,
7915 const MachineInstr &UseMI,
7916 unsigned UseIdx) const {
7917 return isHighLatencyDef(DefMI.getOpcode());
7918 }
7919
hasReassociableOperands(const MachineInstr & Inst,const MachineBasicBlock * MBB) const7920 bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
7921 const MachineBasicBlock *MBB) const {
7922 assert(Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 &&
7923 Inst.getNumDefs() <= 2 && "Reassociation needs binary operators");
7924
7925 // Integer binary math/logic instructions have a third source operand:
7926 // the EFLAGS register. That operand must be both defined here and never
7927 // used; ie, it must be dead. If the EFLAGS operand is live, then we can
7928 // not change anything because rearranging the operands could affect other
7929 // instructions that depend on the exact status flags (zero, sign, etc.)
7930 // that are set by using these particular operands with this operation.
7931 const MachineOperand *FlagDef = Inst.findRegisterDefOperand(X86::EFLAGS);
7932 assert((Inst.getNumDefs() == 1 || FlagDef) &&
7933 "Implicit def isn't flags?");
7934 if (FlagDef && !FlagDef->isDead())
7935 return false;
7936
7937 return TargetInstrInfo::hasReassociableOperands(Inst, MBB);
7938 }
7939
7940 // TODO: There are many more machine instruction opcodes to match:
7941 // 1. Other data types (integer, vectors)
7942 // 2. Other math / logic operations (xor, or)
7943 // 3. Other forms of the same operation (intrinsics and other variants)
isAssociativeAndCommutative(const MachineInstr & Inst) const7944 bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
7945 switch (Inst.getOpcode()) {
7946 case X86::AND8rr:
7947 case X86::AND16rr:
7948 case X86::AND32rr:
7949 case X86::AND64rr:
7950 case X86::OR8rr:
7951 case X86::OR16rr:
7952 case X86::OR32rr:
7953 case X86::OR64rr:
7954 case X86::XOR8rr:
7955 case X86::XOR16rr:
7956 case X86::XOR32rr:
7957 case X86::XOR64rr:
7958 case X86::IMUL16rr:
7959 case X86::IMUL32rr:
7960 case X86::IMUL64rr:
7961 case X86::PANDrr:
7962 case X86::PORrr:
7963 case X86::PXORrr:
7964 case X86::ANDPDrr:
7965 case X86::ANDPSrr:
7966 case X86::ORPDrr:
7967 case X86::ORPSrr:
7968 case X86::XORPDrr:
7969 case X86::XORPSrr:
7970 case X86::PADDBrr:
7971 case X86::PADDWrr:
7972 case X86::PADDDrr:
7973 case X86::PADDQrr:
7974 case X86::PMULLWrr:
7975 case X86::PMULLDrr:
7976 case X86::PMAXSBrr:
7977 case X86::PMAXSDrr:
7978 case X86::PMAXSWrr:
7979 case X86::PMAXUBrr:
7980 case X86::PMAXUDrr:
7981 case X86::PMAXUWrr:
7982 case X86::PMINSBrr:
7983 case X86::PMINSDrr:
7984 case X86::PMINSWrr:
7985 case X86::PMINUBrr:
7986 case X86::PMINUDrr:
7987 case X86::PMINUWrr:
7988 case X86::VPANDrr:
7989 case X86::VPANDYrr:
7990 case X86::VPANDDZ128rr:
7991 case X86::VPANDDZ256rr:
7992 case X86::VPANDDZrr:
7993 case X86::VPANDQZ128rr:
7994 case X86::VPANDQZ256rr:
7995 case X86::VPANDQZrr:
7996 case X86::VPORrr:
7997 case X86::VPORYrr:
7998 case X86::VPORDZ128rr:
7999 case X86::VPORDZ256rr:
8000 case X86::VPORDZrr:
8001 case X86::VPORQZ128rr:
8002 case X86::VPORQZ256rr:
8003 case X86::VPORQZrr:
8004 case X86::VPXORrr:
8005 case X86::VPXORYrr:
8006 case X86::VPXORDZ128rr:
8007 case X86::VPXORDZ256rr:
8008 case X86::VPXORDZrr:
8009 case X86::VPXORQZ128rr:
8010 case X86::VPXORQZ256rr:
8011 case X86::VPXORQZrr:
8012 case X86::VANDPDrr:
8013 case X86::VANDPSrr:
8014 case X86::VANDPDYrr:
8015 case X86::VANDPSYrr:
8016 case X86::VANDPDZ128rr:
8017 case X86::VANDPSZ128rr:
8018 case X86::VANDPDZ256rr:
8019 case X86::VANDPSZ256rr:
8020 case X86::VANDPDZrr:
8021 case X86::VANDPSZrr:
8022 case X86::VORPDrr:
8023 case X86::VORPSrr:
8024 case X86::VORPDYrr:
8025 case X86::VORPSYrr:
8026 case X86::VORPDZ128rr:
8027 case X86::VORPSZ128rr:
8028 case X86::VORPDZ256rr:
8029 case X86::VORPSZ256rr:
8030 case X86::VORPDZrr:
8031 case X86::VORPSZrr:
8032 case X86::VXORPDrr:
8033 case X86::VXORPSrr:
8034 case X86::VXORPDYrr:
8035 case X86::VXORPSYrr:
8036 case X86::VXORPDZ128rr:
8037 case X86::VXORPSZ128rr:
8038 case X86::VXORPDZ256rr:
8039 case X86::VXORPSZ256rr:
8040 case X86::VXORPDZrr:
8041 case X86::VXORPSZrr:
8042 case X86::KADDBrr:
8043 case X86::KADDWrr:
8044 case X86::KADDDrr:
8045 case X86::KADDQrr:
8046 case X86::KANDBrr:
8047 case X86::KANDWrr:
8048 case X86::KANDDrr:
8049 case X86::KANDQrr:
8050 case X86::KORBrr:
8051 case X86::KORWrr:
8052 case X86::KORDrr:
8053 case X86::KORQrr:
8054 case X86::KXORBrr:
8055 case X86::KXORWrr:
8056 case X86::KXORDrr:
8057 case X86::KXORQrr:
8058 case X86::VPADDBrr:
8059 case X86::VPADDWrr:
8060 case X86::VPADDDrr:
8061 case X86::VPADDQrr:
8062 case X86::VPADDBYrr:
8063 case X86::VPADDWYrr:
8064 case X86::VPADDDYrr:
8065 case X86::VPADDQYrr:
8066 case X86::VPADDBZ128rr:
8067 case X86::VPADDWZ128rr:
8068 case X86::VPADDDZ128rr:
8069 case X86::VPADDQZ128rr:
8070 case X86::VPADDBZ256rr:
8071 case X86::VPADDWZ256rr:
8072 case X86::VPADDDZ256rr:
8073 case X86::VPADDQZ256rr:
8074 case X86::VPADDBZrr:
8075 case X86::VPADDWZrr:
8076 case X86::VPADDDZrr:
8077 case X86::VPADDQZrr:
8078 case X86::VPMULLWrr:
8079 case X86::VPMULLWYrr:
8080 case X86::VPMULLWZ128rr:
8081 case X86::VPMULLWZ256rr:
8082 case X86::VPMULLWZrr:
8083 case X86::VPMULLDrr:
8084 case X86::VPMULLDYrr:
8085 case X86::VPMULLDZ128rr:
8086 case X86::VPMULLDZ256rr:
8087 case X86::VPMULLDZrr:
8088 case X86::VPMULLQZ128rr:
8089 case X86::VPMULLQZ256rr:
8090 case X86::VPMULLQZrr:
8091 case X86::VPMAXSBrr:
8092 case X86::VPMAXSBYrr:
8093 case X86::VPMAXSBZ128rr:
8094 case X86::VPMAXSBZ256rr:
8095 case X86::VPMAXSBZrr:
8096 case X86::VPMAXSDrr:
8097 case X86::VPMAXSDYrr:
8098 case X86::VPMAXSDZ128rr:
8099 case X86::VPMAXSDZ256rr:
8100 case X86::VPMAXSDZrr:
8101 case X86::VPMAXSQZ128rr:
8102 case X86::VPMAXSQZ256rr:
8103 case X86::VPMAXSQZrr:
8104 case X86::VPMAXSWrr:
8105 case X86::VPMAXSWYrr:
8106 case X86::VPMAXSWZ128rr:
8107 case X86::VPMAXSWZ256rr:
8108 case X86::VPMAXSWZrr:
8109 case X86::VPMAXUBrr:
8110 case X86::VPMAXUBYrr:
8111 case X86::VPMAXUBZ128rr:
8112 case X86::VPMAXUBZ256rr:
8113 case X86::VPMAXUBZrr:
8114 case X86::VPMAXUDrr:
8115 case X86::VPMAXUDYrr:
8116 case X86::VPMAXUDZ128rr:
8117 case X86::VPMAXUDZ256rr:
8118 case X86::VPMAXUDZrr:
8119 case X86::VPMAXUQZ128rr:
8120 case X86::VPMAXUQZ256rr:
8121 case X86::VPMAXUQZrr:
8122 case X86::VPMAXUWrr:
8123 case X86::VPMAXUWYrr:
8124 case X86::VPMAXUWZ128rr:
8125 case X86::VPMAXUWZ256rr:
8126 case X86::VPMAXUWZrr:
8127 case X86::VPMINSBrr:
8128 case X86::VPMINSBYrr:
8129 case X86::VPMINSBZ128rr:
8130 case X86::VPMINSBZ256rr:
8131 case X86::VPMINSBZrr:
8132 case X86::VPMINSDrr:
8133 case X86::VPMINSDYrr:
8134 case X86::VPMINSDZ128rr:
8135 case X86::VPMINSDZ256rr:
8136 case X86::VPMINSDZrr:
8137 case X86::VPMINSQZ128rr:
8138 case X86::VPMINSQZ256rr:
8139 case X86::VPMINSQZrr:
8140 case X86::VPMINSWrr:
8141 case X86::VPMINSWYrr:
8142 case X86::VPMINSWZ128rr:
8143 case X86::VPMINSWZ256rr:
8144 case X86::VPMINSWZrr:
8145 case X86::VPMINUBrr:
8146 case X86::VPMINUBYrr:
8147 case X86::VPMINUBZ128rr:
8148 case X86::VPMINUBZ256rr:
8149 case X86::VPMINUBZrr:
8150 case X86::VPMINUDrr:
8151 case X86::VPMINUDYrr:
8152 case X86::VPMINUDZ128rr:
8153 case X86::VPMINUDZ256rr:
8154 case X86::VPMINUDZrr:
8155 case X86::VPMINUQZ128rr:
8156 case X86::VPMINUQZ256rr:
8157 case X86::VPMINUQZrr:
8158 case X86::VPMINUWrr:
8159 case X86::VPMINUWYrr:
8160 case X86::VPMINUWZ128rr:
8161 case X86::VPMINUWZ256rr:
8162 case X86::VPMINUWZrr:
8163 // Normal min/max instructions are not commutative because of NaN and signed
8164 // zero semantics, but these are. Thus, there's no need to check for global
8165 // relaxed math; the instructions themselves have the properties we need.
8166 case X86::MAXCPDrr:
8167 case X86::MAXCPSrr:
8168 case X86::MAXCSDrr:
8169 case X86::MAXCSSrr:
8170 case X86::MINCPDrr:
8171 case X86::MINCPSrr:
8172 case X86::MINCSDrr:
8173 case X86::MINCSSrr:
8174 case X86::VMAXCPDrr:
8175 case X86::VMAXCPSrr:
8176 case X86::VMAXCPDYrr:
8177 case X86::VMAXCPSYrr:
8178 case X86::VMAXCPDZ128rr:
8179 case X86::VMAXCPSZ128rr:
8180 case X86::VMAXCPDZ256rr:
8181 case X86::VMAXCPSZ256rr:
8182 case X86::VMAXCPDZrr:
8183 case X86::VMAXCPSZrr:
8184 case X86::VMAXCSDrr:
8185 case X86::VMAXCSSrr:
8186 case X86::VMAXCSDZrr:
8187 case X86::VMAXCSSZrr:
8188 case X86::VMINCPDrr:
8189 case X86::VMINCPSrr:
8190 case X86::VMINCPDYrr:
8191 case X86::VMINCPSYrr:
8192 case X86::VMINCPDZ128rr:
8193 case X86::VMINCPSZ128rr:
8194 case X86::VMINCPDZ256rr:
8195 case X86::VMINCPSZ256rr:
8196 case X86::VMINCPDZrr:
8197 case X86::VMINCPSZrr:
8198 case X86::VMINCSDrr:
8199 case X86::VMINCSSrr:
8200 case X86::VMINCSDZrr:
8201 case X86::VMINCSSZrr:
8202 return true;
8203 case X86::ADDPDrr:
8204 case X86::ADDPSrr:
8205 case X86::ADDSDrr:
8206 case X86::ADDSSrr:
8207 case X86::MULPDrr:
8208 case X86::MULPSrr:
8209 case X86::MULSDrr:
8210 case X86::MULSSrr:
8211 case X86::VADDPDrr:
8212 case X86::VADDPSrr:
8213 case X86::VADDPDYrr:
8214 case X86::VADDPSYrr:
8215 case X86::VADDPDZ128rr:
8216 case X86::VADDPSZ128rr:
8217 case X86::VADDPDZ256rr:
8218 case X86::VADDPSZ256rr:
8219 case X86::VADDPDZrr:
8220 case X86::VADDPSZrr:
8221 case X86::VADDSDrr:
8222 case X86::VADDSSrr:
8223 case X86::VADDSDZrr:
8224 case X86::VADDSSZrr:
8225 case X86::VMULPDrr:
8226 case X86::VMULPSrr:
8227 case X86::VMULPDYrr:
8228 case X86::VMULPSYrr:
8229 case X86::VMULPDZ128rr:
8230 case X86::VMULPSZ128rr:
8231 case X86::VMULPDZ256rr:
8232 case X86::VMULPSZ256rr:
8233 case X86::VMULPDZrr:
8234 case X86::VMULPSZrr:
8235 case X86::VMULSDrr:
8236 case X86::VMULSSrr:
8237 case X86::VMULSDZrr:
8238 case X86::VMULSSZrr:
8239 return Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
8240 Inst.getFlag(MachineInstr::MIFlag::FmNsz);
8241 default:
8242 return false;
8243 }
8244 }
8245
8246 /// If \p DescribedReg overlaps with the MOVrr instruction's destination
8247 /// register then, if possible, describe the value in terms of the source
8248 /// register.
8249 static Optional<ParamLoadedValue>
describeMOVrrLoadedValue(const MachineInstr & MI,Register DescribedReg,const TargetRegisterInfo * TRI)8250 describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg,
8251 const TargetRegisterInfo *TRI) {
8252 Register DestReg = MI.getOperand(0).getReg();
8253 Register SrcReg = MI.getOperand(1).getReg();
8254
8255 auto Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
8256
8257 // If the described register is the destination, just return the source.
8258 if (DestReg == DescribedReg)
8259 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
8260
8261 // If the described register is a sub-register of the destination register,
8262 // then pick out the source register's corresponding sub-register.
8263 if (unsigned SubRegIdx = TRI->getSubRegIndex(DestReg, DescribedReg)) {
8264 unsigned SrcSubReg = TRI->getSubReg(SrcReg, SubRegIdx);
8265 return ParamLoadedValue(MachineOperand::CreateReg(SrcSubReg, false), Expr);
8266 }
8267
8268 // The remaining case to consider is when the described register is a
8269 // super-register of the destination register. MOV8rr and MOV16rr does not
8270 // write to any of the other bytes in the register, meaning that we'd have to
8271 // describe the value using a combination of the source register and the
8272 // non-overlapping bits in the described register, which is not currently
8273 // possible.
8274 if (MI.getOpcode() == X86::MOV8rr || MI.getOpcode() == X86::MOV16rr ||
8275 !TRI->isSuperRegister(DestReg, DescribedReg))
8276 return None;
8277
8278 assert(MI.getOpcode() == X86::MOV32rr && "Unexpected super-register case");
8279 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
8280 }
8281
8282 Optional<ParamLoadedValue>
describeLoadedValue(const MachineInstr & MI,Register Reg) const8283 X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {
8284 const MachineOperand *Op = nullptr;
8285 DIExpression *Expr = nullptr;
8286
8287 const TargetRegisterInfo *TRI = &getRegisterInfo();
8288
8289 switch (MI.getOpcode()) {
8290 case X86::LEA32r:
8291 case X86::LEA64r:
8292 case X86::LEA64_32r: {
8293 // We may need to describe a 64-bit parameter with a 32-bit LEA.
8294 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8295 return None;
8296
8297 // Operand 4 could be global address. For now we do not support
8298 // such situation.
8299 if (!MI.getOperand(4).isImm() || !MI.getOperand(2).isImm())
8300 return None;
8301
8302 const MachineOperand &Op1 = MI.getOperand(1);
8303 const MachineOperand &Op2 = MI.getOperand(3);
8304 assert(Op2.isReg() && (Op2.getReg() == X86::NoRegister ||
8305 Register::isPhysicalRegister(Op2.getReg())));
8306
8307 // Omit situations like:
8308 // %rsi = lea %rsi, 4, ...
8309 if ((Op1.isReg() && Op1.getReg() == MI.getOperand(0).getReg()) ||
8310 Op2.getReg() == MI.getOperand(0).getReg())
8311 return None;
8312 else if ((Op1.isReg() && Op1.getReg() != X86::NoRegister &&
8313 TRI->regsOverlap(Op1.getReg(), MI.getOperand(0).getReg())) ||
8314 (Op2.getReg() != X86::NoRegister &&
8315 TRI->regsOverlap(Op2.getReg(), MI.getOperand(0).getReg())))
8316 return None;
8317
8318 int64_t Coef = MI.getOperand(2).getImm();
8319 int64_t Offset = MI.getOperand(4).getImm();
8320 SmallVector<uint64_t, 8> Ops;
8321
8322 if ((Op1.isReg() && Op1.getReg() != X86::NoRegister)) {
8323 Op = &Op1;
8324 } else if (Op1.isFI())
8325 Op = &Op1;
8326
8327 if (Op && Op->isReg() && Op->getReg() == Op2.getReg() && Coef > 0) {
8328 Ops.push_back(dwarf::DW_OP_constu);
8329 Ops.push_back(Coef + 1);
8330 Ops.push_back(dwarf::DW_OP_mul);
8331 } else {
8332 if (Op && Op2.getReg() != X86::NoRegister) {
8333 int dwarfReg = TRI->getDwarfRegNum(Op2.getReg(), false);
8334 if (dwarfReg < 0)
8335 return None;
8336 else if (dwarfReg < 32) {
8337 Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg);
8338 Ops.push_back(0);
8339 } else {
8340 Ops.push_back(dwarf::DW_OP_bregx);
8341 Ops.push_back(dwarfReg);
8342 Ops.push_back(0);
8343 }
8344 } else if (!Op) {
8345 assert(Op2.getReg() != X86::NoRegister);
8346 Op = &Op2;
8347 }
8348
8349 if (Coef > 1) {
8350 assert(Op2.getReg() != X86::NoRegister);
8351 Ops.push_back(dwarf::DW_OP_constu);
8352 Ops.push_back(Coef);
8353 Ops.push_back(dwarf::DW_OP_mul);
8354 }
8355
8356 if (((Op1.isReg() && Op1.getReg() != X86::NoRegister) || Op1.isFI()) &&
8357 Op2.getReg() != X86::NoRegister) {
8358 Ops.push_back(dwarf::DW_OP_plus);
8359 }
8360 }
8361
8362 DIExpression::appendOffset(Ops, Offset);
8363 Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), Ops);
8364
8365 return ParamLoadedValue(*Op, Expr);;
8366 }
8367 case X86::MOV8ri:
8368 case X86::MOV16ri:
8369 // TODO: Handle MOV8ri and MOV16ri.
8370 return None;
8371 case X86::MOV32ri:
8372 case X86::MOV64ri:
8373 case X86::MOV64ri32:
8374 // MOV32ri may be used for producing zero-extended 32-bit immediates in
8375 // 64-bit parameters, so we need to consider super-registers.
8376 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8377 return None;
8378 return ParamLoadedValue(MI.getOperand(1), Expr);
8379 case X86::MOV8rr:
8380 case X86::MOV16rr:
8381 case X86::MOV32rr:
8382 case X86::MOV64rr:
8383 return describeMOVrrLoadedValue(MI, Reg, TRI);
8384 case X86::XOR32rr: {
8385 // 64-bit parameters are zero-materialized using XOR32rr, so also consider
8386 // super-registers.
8387 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8388 return None;
8389 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
8390 return ParamLoadedValue(MachineOperand::CreateImm(0), Expr);
8391 return None;
8392 }
8393 case X86::MOVSX64rr32: {
8394 // We may need to describe the lower 32 bits of the MOVSX; for example, in
8395 // cases like this:
8396 //
8397 // $ebx = [...]
8398 // $rdi = MOVSX64rr32 $ebx
8399 // $esi = MOV32rr $edi
8400 if (!TRI->isSubRegisterEq(MI.getOperand(0).getReg(), Reg))
8401 return None;
8402
8403 Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
8404
8405 // If the described register is the destination register we need to
8406 // sign-extend the source register from 32 bits. The other case we handle
8407 // is when the described register is the 32-bit sub-register of the
8408 // destination register, in case we just need to return the source
8409 // register.
8410 if (Reg == MI.getOperand(0).getReg())
8411 Expr = DIExpression::appendExt(Expr, 32, 64, true);
8412 else
8413 assert(X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg) &&
8414 "Unhandled sub-register case for MOVSX64rr32");
8415
8416 return ParamLoadedValue(MI.getOperand(1), Expr);
8417 }
8418 default:
8419 assert(!MI.isMoveImmediate() && "Unexpected MoveImm instruction");
8420 return TargetInstrInfo::describeLoadedValue(MI, Reg);
8421 }
8422 }
8423
8424 /// This is an architecture-specific helper function of reassociateOps.
8425 /// Set special operand attributes for new instructions after reassociation.
setSpecialOperandAttr(MachineInstr & OldMI1,MachineInstr & OldMI2,MachineInstr & NewMI1,MachineInstr & NewMI2) const8426 void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
8427 MachineInstr &OldMI2,
8428 MachineInstr &NewMI1,
8429 MachineInstr &NewMI2) const {
8430 // Propagate FP flags from the original instructions.
8431 // But clear poison-generating flags because those may not be valid now.
8432 // TODO: There should be a helper function for copying only fast-math-flags.
8433 uint16_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags();
8434 NewMI1.setFlags(IntersectedFlags);
8435 NewMI1.clearFlag(MachineInstr::MIFlag::NoSWrap);
8436 NewMI1.clearFlag(MachineInstr::MIFlag::NoUWrap);
8437 NewMI1.clearFlag(MachineInstr::MIFlag::IsExact);
8438
8439 NewMI2.setFlags(IntersectedFlags);
8440 NewMI2.clearFlag(MachineInstr::MIFlag::NoSWrap);
8441 NewMI2.clearFlag(MachineInstr::MIFlag::NoUWrap);
8442 NewMI2.clearFlag(MachineInstr::MIFlag::IsExact);
8443
8444 // Integer instructions may define an implicit EFLAGS dest register operand.
8445 MachineOperand *OldFlagDef1 = OldMI1.findRegisterDefOperand(X86::EFLAGS);
8446 MachineOperand *OldFlagDef2 = OldMI2.findRegisterDefOperand(X86::EFLAGS);
8447
8448 assert(!OldFlagDef1 == !OldFlagDef2 &&
8449 "Unexpected instruction type for reassociation");
8450
8451 if (!OldFlagDef1 || !OldFlagDef2)
8452 return;
8453
8454 assert(OldFlagDef1->isDead() && OldFlagDef2->isDead() &&
8455 "Must have dead EFLAGS operand in reassociable instruction");
8456
8457 MachineOperand *NewFlagDef1 = NewMI1.findRegisterDefOperand(X86::EFLAGS);
8458 MachineOperand *NewFlagDef2 = NewMI2.findRegisterDefOperand(X86::EFLAGS);
8459
8460 assert(NewFlagDef1 && NewFlagDef2 &&
8461 "Unexpected operand in reassociable instruction");
8462
8463 // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations
8464 // of this pass or other passes. The EFLAGS operands must be dead in these new
8465 // instructions because the EFLAGS operands in the original instructions must
8466 // be dead in order for reassociation to occur.
8467 NewFlagDef1->setIsDead();
8468 NewFlagDef2->setIsDead();
8469 }
8470
8471 std::pair<unsigned, unsigned>
decomposeMachineOperandsTargetFlags(unsigned TF) const8472 X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
8473 return std::make_pair(TF, 0u);
8474 }
8475
8476 ArrayRef<std::pair<unsigned, const char *>>
getSerializableDirectMachineOperandTargetFlags() const8477 X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
8478 using namespace X86II;
8479 static const std::pair<unsigned, const char *> TargetFlags[] = {
8480 {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"},
8481 {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"},
8482 {MO_GOT, "x86-got"},
8483 {MO_GOTOFF, "x86-gotoff"},
8484 {MO_GOTPCREL, "x86-gotpcrel"},
8485 {MO_PLT, "x86-plt"},
8486 {MO_TLSGD, "x86-tlsgd"},
8487 {MO_TLSLD, "x86-tlsld"},
8488 {MO_TLSLDM, "x86-tlsldm"},
8489 {MO_GOTTPOFF, "x86-gottpoff"},
8490 {MO_INDNTPOFF, "x86-indntpoff"},
8491 {MO_TPOFF, "x86-tpoff"},
8492 {MO_DTPOFF, "x86-dtpoff"},
8493 {MO_NTPOFF, "x86-ntpoff"},
8494 {MO_GOTNTPOFF, "x86-gotntpoff"},
8495 {MO_DLLIMPORT, "x86-dllimport"},
8496 {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"},
8497 {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"},
8498 {MO_TLVP, "x86-tlvp"},
8499 {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"},
8500 {MO_SECREL, "x86-secrel"},
8501 {MO_COFFSTUB, "x86-coffstub"}};
8502 return makeArrayRef(TargetFlags);
8503 }
8504
8505 namespace {
8506 /// Create Global Base Reg pass. This initializes the PIC
8507 /// global base register for x86-32.
8508 struct CGBR : public MachineFunctionPass {
8509 static char ID;
CGBR__anonbbc25e450511::CGBR8510 CGBR() : MachineFunctionPass(ID) {}
8511
runOnMachineFunction__anonbbc25e450511::CGBR8512 bool runOnMachineFunction(MachineFunction &MF) override {
8513 const X86TargetMachine *TM =
8514 static_cast<const X86TargetMachine *>(&MF.getTarget());
8515 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
8516
8517 // Don't do anything in the 64-bit small and kernel code models. They use
8518 // RIP-relative addressing for everything.
8519 if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small ||
8520 TM->getCodeModel() == CodeModel::Kernel))
8521 return false;
8522
8523 // Only emit a global base reg in PIC mode.
8524 if (!TM->isPositionIndependent())
8525 return false;
8526
8527 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
8528 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
8529
8530 // If we didn't need a GlobalBaseReg, don't insert code.
8531 if (GlobalBaseReg == 0)
8532 return false;
8533
8534 // Insert the set of GlobalBaseReg into the first MBB of the function
8535 MachineBasicBlock &FirstMBB = MF.front();
8536 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
8537 DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
8538 MachineRegisterInfo &RegInfo = MF.getRegInfo();
8539 const X86InstrInfo *TII = STI.getInstrInfo();
8540
8541 unsigned PC;
8542 if (STI.isPICStyleGOT())
8543 PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
8544 else
8545 PC = GlobalBaseReg;
8546
8547 if (STI.is64Bit()) {
8548 if (TM->getCodeModel() == CodeModel::Medium) {
8549 // In the medium code model, use a RIP-relative LEA to materialize the
8550 // GOT.
8551 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC)
8552 .addReg(X86::RIP)
8553 .addImm(0)
8554 .addReg(0)
8555 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_")
8556 .addReg(0);
8557 } else if (TM->getCodeModel() == CodeModel::Large) {
8558 // In the large code model, we are aiming for this code, though the
8559 // register allocation may vary:
8560 // leaq .LN$pb(%rip), %rax
8561 // movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx
8562 // addq %rcx, %rax
8563 // RAX now holds address of _GLOBAL_OFFSET_TABLE_.
8564 Register PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
8565 Register GOTReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
8566 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg)
8567 .addReg(X86::RIP)
8568 .addImm(0)
8569 .addReg(0)
8570 .addSym(MF.getPICBaseSymbol())
8571 .addReg(0);
8572 std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol());
8573 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg)
8574 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
8575 X86II::MO_PIC_BASE_OFFSET);
8576 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC)
8577 .addReg(PBReg, RegState::Kill)
8578 .addReg(GOTReg, RegState::Kill);
8579 } else {
8580 llvm_unreachable("unexpected code model");
8581 }
8582 } else {
8583 // Operand of MovePCtoStack is completely ignored by asm printer. It's
8584 // only used in JIT code emission as displacement to pc.
8585 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
8586
8587 // If we're using vanilla 'GOT' PIC style, we should use relative
8588 // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
8589 if (STI.isPICStyleGOT()) {
8590 // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel],
8591 // %some_register
8592 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
8593 .addReg(PC)
8594 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
8595 X86II::MO_GOT_ABSOLUTE_ADDRESS);
8596 }
8597 }
8598
8599 return true;
8600 }
8601
getPassName__anonbbc25e450511::CGBR8602 StringRef getPassName() const override {
8603 return "X86 PIC Global Base Reg Initialization";
8604 }
8605
getAnalysisUsage__anonbbc25e450511::CGBR8606 void getAnalysisUsage(AnalysisUsage &AU) const override {
8607 AU.setPreservesCFG();
8608 MachineFunctionPass::getAnalysisUsage(AU);
8609 }
8610 };
8611 }
8612
8613 char CGBR::ID = 0;
8614 FunctionPass*
createX86GlobalBaseRegPass()8615 llvm::createX86GlobalBaseRegPass() { return new CGBR(); }
8616
8617 namespace {
8618 struct LDTLSCleanup : public MachineFunctionPass {
8619 static char ID;
LDTLSCleanup__anonbbc25e450611::LDTLSCleanup8620 LDTLSCleanup() : MachineFunctionPass(ID) {}
8621
runOnMachineFunction__anonbbc25e450611::LDTLSCleanup8622 bool runOnMachineFunction(MachineFunction &MF) override {
8623 if (skipFunction(MF.getFunction()))
8624 return false;
8625
8626 X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
8627 if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
8628 // No point folding accesses if there isn't at least two.
8629 return false;
8630 }
8631
8632 MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
8633 return VisitNode(DT->getRootNode(), 0);
8634 }
8635
8636 // Visit the dominator subtree rooted at Node in pre-order.
8637 // If TLSBaseAddrReg is non-null, then use that to replace any
8638 // TLS_base_addr instructions. Otherwise, create the register
8639 // when the first such instruction is seen, and then use it
8640 // as we encounter more instructions.
VisitNode__anonbbc25e450611::LDTLSCleanup8641 bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
8642 MachineBasicBlock *BB = Node->getBlock();
8643 bool Changed = false;
8644
8645 // Traverse the current block.
8646 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
8647 ++I) {
8648 switch (I->getOpcode()) {
8649 case X86::TLS_base_addr32:
8650 case X86::TLS_base_addr64:
8651 if (TLSBaseAddrReg)
8652 I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
8653 else
8654 I = SetRegister(*I, &TLSBaseAddrReg);
8655 Changed = true;
8656 break;
8657 default:
8658 break;
8659 }
8660 }
8661
8662 // Visit the children of this block in the dominator tree.
8663 for (auto I = Node->begin(), E = Node->end(); I != E; ++I) {
8664 Changed |= VisitNode(*I, TLSBaseAddrReg);
8665 }
8666
8667 return Changed;
8668 }
8669
8670 // Replace the TLS_base_addr instruction I with a copy from
8671 // TLSBaseAddrReg, returning the new instruction.
ReplaceTLSBaseAddrCall__anonbbc25e450611::LDTLSCleanup8672 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
8673 unsigned TLSBaseAddrReg) {
8674 MachineFunction *MF = I.getParent()->getParent();
8675 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
8676 const bool is64Bit = STI.is64Bit();
8677 const X86InstrInfo *TII = STI.getInstrInfo();
8678
8679 // Insert a Copy from TLSBaseAddrReg to RAX/EAX.
8680 MachineInstr *Copy =
8681 BuildMI(*I.getParent(), I, I.getDebugLoc(),
8682 TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
8683 .addReg(TLSBaseAddrReg);
8684
8685 // Erase the TLS_base_addr instruction.
8686 I.eraseFromParent();
8687
8688 return Copy;
8689 }
8690
8691 // Create a virtual register in *TLSBaseAddrReg, and populate it by
8692 // inserting a copy instruction after I. Returns the new instruction.
SetRegister__anonbbc25e450611::LDTLSCleanup8693 MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
8694 MachineFunction *MF = I.getParent()->getParent();
8695 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
8696 const bool is64Bit = STI.is64Bit();
8697 const X86InstrInfo *TII = STI.getInstrInfo();
8698
8699 // Create a virtual register for the TLS base address.
8700 MachineRegisterInfo &RegInfo = MF->getRegInfo();
8701 *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit
8702 ? &X86::GR64RegClass
8703 : &X86::GR32RegClass);
8704
8705 // Insert a copy from RAX/EAX to TLSBaseAddrReg.
8706 MachineInstr *Next = I.getNextNode();
8707 MachineInstr *Copy =
8708 BuildMI(*I.getParent(), Next, I.getDebugLoc(),
8709 TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
8710 .addReg(is64Bit ? X86::RAX : X86::EAX);
8711
8712 return Copy;
8713 }
8714
getPassName__anonbbc25e450611::LDTLSCleanup8715 StringRef getPassName() const override {
8716 return "Local Dynamic TLS Access Clean-up";
8717 }
8718
getAnalysisUsage__anonbbc25e450611::LDTLSCleanup8719 void getAnalysisUsage(AnalysisUsage &AU) const override {
8720 AU.setPreservesCFG();
8721 AU.addRequired<MachineDominatorTree>();
8722 MachineFunctionPass::getAnalysisUsage(AU);
8723 }
8724 };
8725 }
8726
8727 char LDTLSCleanup::ID = 0;
8728 FunctionPass*
createCleanupLocalDynamicTLSPass()8729 llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }
8730
8731 /// Constants defining how certain sequences should be outlined.
8732 ///
8733 /// \p MachineOutlinerDefault implies that the function is called with a call
8734 /// instruction, and a return must be emitted for the outlined function frame.
8735 ///
8736 /// That is,
8737 ///
8738 /// I1 OUTLINED_FUNCTION:
8739 /// I2 --> call OUTLINED_FUNCTION I1
8740 /// I3 I2
8741 /// I3
8742 /// ret
8743 ///
8744 /// * Call construction overhead: 1 (call instruction)
8745 /// * Frame construction overhead: 1 (return instruction)
8746 ///
8747 /// \p MachineOutlinerTailCall implies that the function is being tail called.
8748 /// A jump is emitted instead of a call, and the return is already present in
8749 /// the outlined sequence. That is,
8750 ///
8751 /// I1 OUTLINED_FUNCTION:
8752 /// I2 --> jmp OUTLINED_FUNCTION I1
8753 /// ret I2
8754 /// ret
8755 ///
8756 /// * Call construction overhead: 1 (jump instruction)
8757 /// * Frame construction overhead: 0 (don't need to return)
8758 ///
8759 enum MachineOutlinerClass {
8760 MachineOutlinerDefault,
8761 MachineOutlinerTailCall
8762 };
8763
getOutliningCandidateInfo(std::vector<outliner::Candidate> & RepeatedSequenceLocs) const8764 outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo(
8765 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
8766 unsigned SequenceSize =
8767 std::accumulate(RepeatedSequenceLocs[0].front(),
8768 std::next(RepeatedSequenceLocs[0].back()), 0,
8769 [](unsigned Sum, const MachineInstr &MI) {
8770 // FIXME: x86 doesn't implement getInstSizeInBytes, so
8771 // we can't tell the cost. Just assume each instruction
8772 // is one byte.
8773 if (MI.isDebugInstr() || MI.isKill())
8774 return Sum;
8775 return Sum + 1;
8776 });
8777
8778 // We check to see if CFI Instructions are present, and if they are
8779 // we find the number of CFI Instructions in the candidates.
8780 unsigned CFICount = 0;
8781 MachineBasicBlock::iterator MBBI = RepeatedSequenceLocs[0].front();
8782 for (unsigned Loc = RepeatedSequenceLocs[0].getStartIdx();
8783 Loc < RepeatedSequenceLocs[0].getEndIdx() + 1; Loc++) {
8784 const std::vector<MCCFIInstruction> &CFIInstructions =
8785 RepeatedSequenceLocs[0].getMF()->getFrameInstructions();
8786 if (MBBI->isCFIInstruction()) {
8787 unsigned CFIIndex = MBBI->getOperand(0).getCFIIndex();
8788 MCCFIInstruction CFI = CFIInstructions[CFIIndex];
8789 CFICount++;
8790 }
8791 MBBI++;
8792 }
8793
8794 // We compare the number of found CFI Instructions to the number of CFI
8795 // instructions in the parent function for each candidate. We must check this
8796 // since if we outline one of the CFI instructions in a function, we have to
8797 // outline them all for correctness. If we do not, the address offsets will be
8798 // incorrect between the two sections of the program.
8799 for (outliner::Candidate &C : RepeatedSequenceLocs) {
8800 std::vector<MCCFIInstruction> CFIInstructions =
8801 C.getMF()->getFrameInstructions();
8802
8803 if (CFICount > 0 && CFICount != CFIInstructions.size())
8804 return outliner::OutlinedFunction();
8805 }
8806
8807 // FIXME: Use real size in bytes for call and ret instructions.
8808 if (RepeatedSequenceLocs[0].back()->isTerminator()) {
8809 for (outliner::Candidate &C : RepeatedSequenceLocs)
8810 C.setCallInfo(MachineOutlinerTailCall, 1);
8811
8812 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
8813 0, // Number of bytes to emit frame.
8814 MachineOutlinerTailCall // Type of frame.
8815 );
8816 }
8817
8818 if (CFICount > 0)
8819 return outliner::OutlinedFunction();
8820
8821 for (outliner::Candidate &C : RepeatedSequenceLocs)
8822 C.setCallInfo(MachineOutlinerDefault, 1);
8823
8824 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1,
8825 MachineOutlinerDefault);
8826 }
8827
isFunctionSafeToOutlineFrom(MachineFunction & MF,bool OutlineFromLinkOnceODRs) const8828 bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
8829 bool OutlineFromLinkOnceODRs) const {
8830 const Function &F = MF.getFunction();
8831
8832 // Does the function use a red zone? If it does, then we can't risk messing
8833 // with the stack.
8834 if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) {
8835 // It could have a red zone. If it does, then we don't want to touch it.
8836 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
8837 if (!X86FI || X86FI->getUsesRedZone())
8838 return false;
8839 }
8840
8841 // If we *don't* want to outline from things that could potentially be deduped
8842 // then return false.
8843 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
8844 return false;
8845
8846 // This function is viable for outlining, so return true.
8847 return true;
8848 }
8849
8850 outliner::InstrType
getOutliningType(MachineBasicBlock::iterator & MIT,unsigned Flags) const8851 X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
8852 MachineInstr &MI = *MIT;
8853 // Don't allow debug values to impact outlining type.
8854 if (MI.isDebugInstr() || MI.isIndirectDebugValue())
8855 return outliner::InstrType::Invisible;
8856
8857 // At this point, KILL instructions don't really tell us much so we can go
8858 // ahead and skip over them.
8859 if (MI.isKill())
8860 return outliner::InstrType::Invisible;
8861
8862 // Is this a tail call? If yes, we can outline as a tail call.
8863 if (isTailCall(MI))
8864 return outliner::InstrType::Legal;
8865
8866 // Is this the terminator of a basic block?
8867 if (MI.isTerminator() || MI.isReturn()) {
8868
8869 // Does its parent have any successors in its MachineFunction?
8870 if (MI.getParent()->succ_empty())
8871 return outliner::InstrType::Legal;
8872
8873 // It does, so we can't tail call it.
8874 return outliner::InstrType::Illegal;
8875 }
8876
8877 // Don't outline anything that modifies or reads from the stack pointer.
8878 //
8879 // FIXME: There are instructions which are being manually built without
8880 // explicit uses/defs so we also have to check the MCInstrDesc. We should be
8881 // able to remove the extra checks once those are fixed up. For example,
8882 // sometimes we might get something like %rax = POP64r 1. This won't be
8883 // caught by modifiesRegister or readsRegister even though the instruction
8884 // really ought to be formed so that modifiesRegister/readsRegister would
8885 // catch it.
8886 if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) ||
8887 MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
8888 MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
8889 return outliner::InstrType::Illegal;
8890
8891 // Outlined calls change the instruction pointer, so don't read from it.
8892 if (MI.readsRegister(X86::RIP, &RI) ||
8893 MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
8894 MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
8895 return outliner::InstrType::Illegal;
8896
8897 // Positions can't safely be outlined.
8898 if (MI.isPosition())
8899 return outliner::InstrType::Illegal;
8900
8901 // Make sure none of the operands of this instruction do anything tricky.
8902 for (const MachineOperand &MOP : MI.operands())
8903 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
8904 MOP.isTargetIndex())
8905 return outliner::InstrType::Illegal;
8906
8907 return outliner::InstrType::Legal;
8908 }
8909
buildOutlinedFrame(MachineBasicBlock & MBB,MachineFunction & MF,const outliner::OutlinedFunction & OF) const8910 void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB,
8911 MachineFunction &MF,
8912 const outliner::OutlinedFunction &OF)
8913 const {
8914 // If we're a tail call, we already have a return, so don't do anything.
8915 if (OF.FrameConstructionID == MachineOutlinerTailCall)
8916 return;
8917
8918 // We're a normal call, so our sequence doesn't have a return instruction.
8919 // Add it in.
8920 MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RETQ));
8921 MBB.insert(MBB.end(), retq);
8922 }
8923
8924 MachineBasicBlock::iterator
insertOutlinedCall(Module & M,MachineBasicBlock & MBB,MachineBasicBlock::iterator & It,MachineFunction & MF,const outliner::Candidate & C) const8925 X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
8926 MachineBasicBlock::iterator &It,
8927 MachineFunction &MF,
8928 const outliner::Candidate &C) const {
8929 // Is it a tail call?
8930 if (C.CallConstructionID == MachineOutlinerTailCall) {
8931 // Yes, just insert a JMP.
8932 It = MBB.insert(It,
8933 BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64))
8934 .addGlobalAddress(M.getNamedValue(MF.getName())));
8935 } else {
8936 // No, insert a call.
8937 It = MBB.insert(It,
8938 BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32))
8939 .addGlobalAddress(M.getNamedValue(MF.getName())));
8940 }
8941
8942 return It;
8943 }
8944
8945 #define GET_INSTRINFO_HELPERS
8946 #include "X86GenInstrInfo.inc"
8947