1 //===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the X86 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "X86InstrInfo.h"
14 #include "X86.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrFoldTables.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/Sequence.h"
22 #include "llvm/CodeGen/LivePhysRegs.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineConstantPool.h"
25 #include "llvm/CodeGen/MachineDominators.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/StackMaps.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/MC/MCAsmInfo.h"
35 #include "llvm/MC/MCExpr.h"
36 #include "llvm/MC/MCInst.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Target/TargetOptions.h"
42
43 using namespace llvm;
44
45 #define DEBUG_TYPE "x86-instr-info"
46
47 #define GET_INSTRINFO_CTOR_DTOR
48 #include "X86GenInstrInfo.inc"
49
50 static cl::opt<bool>
51 NoFusing("disable-spill-fusing",
52 cl::desc("Disable fusing of spill code into instructions"),
53 cl::Hidden);
54 static cl::opt<bool>
55 PrintFailedFusing("print-failed-fuse-candidates",
56 cl::desc("Print instructions that the allocator wants to"
57 " fuse, but the X86 backend currently can't"),
58 cl::Hidden);
59 static cl::opt<bool>
60 ReMatPICStubLoad("remat-pic-stub-load",
61 cl::desc("Re-materialize load from stub in PIC mode"),
62 cl::init(false), cl::Hidden);
63 static cl::opt<unsigned>
64 PartialRegUpdateClearance("partial-reg-update-clearance",
65 cl::desc("Clearance between two register writes "
66 "for inserting XOR to avoid partial "
67 "register update"),
68 cl::init(64), cl::Hidden);
69 static cl::opt<unsigned>
70 UndefRegClearance("undef-reg-clearance",
71 cl::desc("How many idle instructions we would like before "
72 "certain undef register reads"),
73 cl::init(128), cl::Hidden);
74
75
76 // Pin the vtable to this file.
anchor()77 void X86InstrInfo::anchor() {}
78
X86InstrInfo(X86Subtarget & STI)79 X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
80 : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64
81 : X86::ADJCALLSTACKDOWN32),
82 (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
83 : X86::ADJCALLSTACKUP32),
84 X86::CATCHRET,
85 (STI.is64Bit() ? X86::RETQ : X86::RETL)),
86 Subtarget(STI), RI(STI.getTargetTriple()) {
87 }
88
89 bool
isCoalescableExtInstr(const MachineInstr & MI,Register & SrcReg,Register & DstReg,unsigned & SubIdx) const90 X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
91 Register &SrcReg, Register &DstReg,
92 unsigned &SubIdx) const {
93 switch (MI.getOpcode()) {
94 default: break;
95 case X86::MOVSX16rr8:
96 case X86::MOVZX16rr8:
97 case X86::MOVSX32rr8:
98 case X86::MOVZX32rr8:
99 case X86::MOVSX64rr8:
100 if (!Subtarget.is64Bit())
101 // It's not always legal to reference the low 8-bit of the larger
102 // register in 32-bit mode.
103 return false;
104 LLVM_FALLTHROUGH;
105 case X86::MOVSX32rr16:
106 case X86::MOVZX32rr16:
107 case X86::MOVSX64rr16:
108 case X86::MOVSX64rr32: {
109 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
110 // Be conservative.
111 return false;
112 SrcReg = MI.getOperand(1).getReg();
113 DstReg = MI.getOperand(0).getReg();
114 switch (MI.getOpcode()) {
115 default: llvm_unreachable("Unreachable!");
116 case X86::MOVSX16rr8:
117 case X86::MOVZX16rr8:
118 case X86::MOVSX32rr8:
119 case X86::MOVZX32rr8:
120 case X86::MOVSX64rr8:
121 SubIdx = X86::sub_8bit;
122 break;
123 case X86::MOVSX32rr16:
124 case X86::MOVZX32rr16:
125 case X86::MOVSX64rr16:
126 SubIdx = X86::sub_16bit;
127 break;
128 case X86::MOVSX64rr32:
129 SubIdx = X86::sub_32bit;
130 break;
131 }
132 return true;
133 }
134 }
135 return false;
136 }
137
isDataInvariant(MachineInstr & MI)138 bool X86InstrInfo::isDataInvariant(MachineInstr &MI) {
139 switch (MI.getOpcode()) {
140 default:
141 // By default, assume that the instruction is not data invariant.
142 return false;
143
144 // Some target-independent operations that trivially lower to data-invariant
145 // instructions.
146 case TargetOpcode::COPY:
147 case TargetOpcode::INSERT_SUBREG:
148 case TargetOpcode::SUBREG_TO_REG:
149 return true;
150
151 // On x86 it is believed that imul is constant time w.r.t. the loaded data.
152 // However, they set flags and are perhaps the most surprisingly constant
153 // time operations so we call them out here separately.
154 case X86::IMUL16rr:
155 case X86::IMUL16rri8:
156 case X86::IMUL16rri:
157 case X86::IMUL32rr:
158 case X86::IMUL32rri8:
159 case X86::IMUL32rri:
160 case X86::IMUL64rr:
161 case X86::IMUL64rri32:
162 case X86::IMUL64rri8:
163
164 // Bit scanning and counting instructions that are somewhat surprisingly
165 // constant time as they scan across bits and do other fairly complex
166 // operations like popcnt, but are believed to be constant time on x86.
167 // However, these set flags.
168 case X86::BSF16rr:
169 case X86::BSF32rr:
170 case X86::BSF64rr:
171 case X86::BSR16rr:
172 case X86::BSR32rr:
173 case X86::BSR64rr:
174 case X86::LZCNT16rr:
175 case X86::LZCNT32rr:
176 case X86::LZCNT64rr:
177 case X86::POPCNT16rr:
178 case X86::POPCNT32rr:
179 case X86::POPCNT64rr:
180 case X86::TZCNT16rr:
181 case X86::TZCNT32rr:
182 case X86::TZCNT64rr:
183
184 // Bit manipulation instructions are effectively combinations of basic
185 // arithmetic ops, and should still execute in constant time. These also
186 // set flags.
187 case X86::BLCFILL32rr:
188 case X86::BLCFILL64rr:
189 case X86::BLCI32rr:
190 case X86::BLCI64rr:
191 case X86::BLCIC32rr:
192 case X86::BLCIC64rr:
193 case X86::BLCMSK32rr:
194 case X86::BLCMSK64rr:
195 case X86::BLCS32rr:
196 case X86::BLCS64rr:
197 case X86::BLSFILL32rr:
198 case X86::BLSFILL64rr:
199 case X86::BLSI32rr:
200 case X86::BLSI64rr:
201 case X86::BLSIC32rr:
202 case X86::BLSIC64rr:
203 case X86::BLSMSK32rr:
204 case X86::BLSMSK64rr:
205 case X86::BLSR32rr:
206 case X86::BLSR64rr:
207 case X86::TZMSK32rr:
208 case X86::TZMSK64rr:
209
210 // Bit extracting and clearing instructions should execute in constant time,
211 // and set flags.
212 case X86::BEXTR32rr:
213 case X86::BEXTR64rr:
214 case X86::BEXTRI32ri:
215 case X86::BEXTRI64ri:
216 case X86::BZHI32rr:
217 case X86::BZHI64rr:
218
219 // Shift and rotate.
220 case X86::ROL8r1:
221 case X86::ROL16r1:
222 case X86::ROL32r1:
223 case X86::ROL64r1:
224 case X86::ROL8rCL:
225 case X86::ROL16rCL:
226 case X86::ROL32rCL:
227 case X86::ROL64rCL:
228 case X86::ROL8ri:
229 case X86::ROL16ri:
230 case X86::ROL32ri:
231 case X86::ROL64ri:
232 case X86::ROR8r1:
233 case X86::ROR16r1:
234 case X86::ROR32r1:
235 case X86::ROR64r1:
236 case X86::ROR8rCL:
237 case X86::ROR16rCL:
238 case X86::ROR32rCL:
239 case X86::ROR64rCL:
240 case X86::ROR8ri:
241 case X86::ROR16ri:
242 case X86::ROR32ri:
243 case X86::ROR64ri:
244 case X86::SAR8r1:
245 case X86::SAR16r1:
246 case X86::SAR32r1:
247 case X86::SAR64r1:
248 case X86::SAR8rCL:
249 case X86::SAR16rCL:
250 case X86::SAR32rCL:
251 case X86::SAR64rCL:
252 case X86::SAR8ri:
253 case X86::SAR16ri:
254 case X86::SAR32ri:
255 case X86::SAR64ri:
256 case X86::SHL8r1:
257 case X86::SHL16r1:
258 case X86::SHL32r1:
259 case X86::SHL64r1:
260 case X86::SHL8rCL:
261 case X86::SHL16rCL:
262 case X86::SHL32rCL:
263 case X86::SHL64rCL:
264 case X86::SHL8ri:
265 case X86::SHL16ri:
266 case X86::SHL32ri:
267 case X86::SHL64ri:
268 case X86::SHR8r1:
269 case X86::SHR16r1:
270 case X86::SHR32r1:
271 case X86::SHR64r1:
272 case X86::SHR8rCL:
273 case X86::SHR16rCL:
274 case X86::SHR32rCL:
275 case X86::SHR64rCL:
276 case X86::SHR8ri:
277 case X86::SHR16ri:
278 case X86::SHR32ri:
279 case X86::SHR64ri:
280 case X86::SHLD16rrCL:
281 case X86::SHLD32rrCL:
282 case X86::SHLD64rrCL:
283 case X86::SHLD16rri8:
284 case X86::SHLD32rri8:
285 case X86::SHLD64rri8:
286 case X86::SHRD16rrCL:
287 case X86::SHRD32rrCL:
288 case X86::SHRD64rrCL:
289 case X86::SHRD16rri8:
290 case X86::SHRD32rri8:
291 case X86::SHRD64rri8:
292
293 // Basic arithmetic is constant time on the input but does set flags.
294 case X86::ADC8rr:
295 case X86::ADC8ri:
296 case X86::ADC16rr:
297 case X86::ADC16ri:
298 case X86::ADC16ri8:
299 case X86::ADC32rr:
300 case X86::ADC32ri:
301 case X86::ADC32ri8:
302 case X86::ADC64rr:
303 case X86::ADC64ri8:
304 case X86::ADC64ri32:
305 case X86::ADD8rr:
306 case X86::ADD8ri:
307 case X86::ADD16rr:
308 case X86::ADD16ri:
309 case X86::ADD16ri8:
310 case X86::ADD32rr:
311 case X86::ADD32ri:
312 case X86::ADD32ri8:
313 case X86::ADD64rr:
314 case X86::ADD64ri8:
315 case X86::ADD64ri32:
316 case X86::AND8rr:
317 case X86::AND8ri:
318 case X86::AND16rr:
319 case X86::AND16ri:
320 case X86::AND16ri8:
321 case X86::AND32rr:
322 case X86::AND32ri:
323 case X86::AND32ri8:
324 case X86::AND64rr:
325 case X86::AND64ri8:
326 case X86::AND64ri32:
327 case X86::OR8rr:
328 case X86::OR8ri:
329 case X86::OR16rr:
330 case X86::OR16ri:
331 case X86::OR16ri8:
332 case X86::OR32rr:
333 case X86::OR32ri:
334 case X86::OR32ri8:
335 case X86::OR64rr:
336 case X86::OR64ri8:
337 case X86::OR64ri32:
338 case X86::SBB8rr:
339 case X86::SBB8ri:
340 case X86::SBB16rr:
341 case X86::SBB16ri:
342 case X86::SBB16ri8:
343 case X86::SBB32rr:
344 case X86::SBB32ri:
345 case X86::SBB32ri8:
346 case X86::SBB64rr:
347 case X86::SBB64ri8:
348 case X86::SBB64ri32:
349 case X86::SUB8rr:
350 case X86::SUB8ri:
351 case X86::SUB16rr:
352 case X86::SUB16ri:
353 case X86::SUB16ri8:
354 case X86::SUB32rr:
355 case X86::SUB32ri:
356 case X86::SUB32ri8:
357 case X86::SUB64rr:
358 case X86::SUB64ri8:
359 case X86::SUB64ri32:
360 case X86::XOR8rr:
361 case X86::XOR8ri:
362 case X86::XOR16rr:
363 case X86::XOR16ri:
364 case X86::XOR16ri8:
365 case X86::XOR32rr:
366 case X86::XOR32ri:
367 case X86::XOR32ri8:
368 case X86::XOR64rr:
369 case X86::XOR64ri8:
370 case X86::XOR64ri32:
371 // Arithmetic with just 32-bit and 64-bit variants and no immediates.
372 case X86::ADCX32rr:
373 case X86::ADCX64rr:
374 case X86::ADOX32rr:
375 case X86::ADOX64rr:
376 case X86::ANDN32rr:
377 case X86::ANDN64rr:
378 // Unary arithmetic operations.
379 case X86::DEC8r:
380 case X86::DEC16r:
381 case X86::DEC32r:
382 case X86::DEC64r:
383 case X86::INC8r:
384 case X86::INC16r:
385 case X86::INC32r:
386 case X86::INC64r:
387 case X86::NEG8r:
388 case X86::NEG16r:
389 case X86::NEG32r:
390 case X86::NEG64r:
391
392 // Unlike other arithmetic, NOT doesn't set EFLAGS.
393 case X86::NOT8r:
394 case X86::NOT16r:
395 case X86::NOT32r:
396 case X86::NOT64r:
397
398 // Various move instructions used to zero or sign extend things. Note that we
399 // intentionally don't support the _NOREX variants as we can't handle that
400 // register constraint anyways.
401 case X86::MOVSX16rr8:
402 case X86::MOVSX32rr8:
403 case X86::MOVSX32rr16:
404 case X86::MOVSX64rr8:
405 case X86::MOVSX64rr16:
406 case X86::MOVSX64rr32:
407 case X86::MOVZX16rr8:
408 case X86::MOVZX32rr8:
409 case X86::MOVZX32rr16:
410 case X86::MOVZX64rr8:
411 case X86::MOVZX64rr16:
412 case X86::MOV32rr:
413
414 // Arithmetic instructions that are both constant time and don't set flags.
415 case X86::RORX32ri:
416 case X86::RORX64ri:
417 case X86::SARX32rr:
418 case X86::SARX64rr:
419 case X86::SHLX32rr:
420 case X86::SHLX64rr:
421 case X86::SHRX32rr:
422 case X86::SHRX64rr:
423
424 // LEA doesn't actually access memory, and its arithmetic is constant time.
425 case X86::LEA16r:
426 case X86::LEA32r:
427 case X86::LEA64_32r:
428 case X86::LEA64r:
429 return true;
430 }
431 }
432
isDataInvariantLoad(MachineInstr & MI)433 bool X86InstrInfo::isDataInvariantLoad(MachineInstr &MI) {
434 switch (MI.getOpcode()) {
435 default:
436 // By default, assume that the load will immediately leak.
437 return false;
438
439 // On x86 it is believed that imul is constant time w.r.t. the loaded data.
440 // However, they set flags and are perhaps the most surprisingly constant
441 // time operations so we call them out here separately.
442 case X86::IMUL16rm:
443 case X86::IMUL16rmi8:
444 case X86::IMUL16rmi:
445 case X86::IMUL32rm:
446 case X86::IMUL32rmi8:
447 case X86::IMUL32rmi:
448 case X86::IMUL64rm:
449 case X86::IMUL64rmi32:
450 case X86::IMUL64rmi8:
451
452 // Bit scanning and counting instructions that are somewhat surprisingly
453 // constant time as they scan across bits and do other fairly complex
454 // operations like popcnt, but are believed to be constant time on x86.
455 // However, these set flags.
456 case X86::BSF16rm:
457 case X86::BSF32rm:
458 case X86::BSF64rm:
459 case X86::BSR16rm:
460 case X86::BSR32rm:
461 case X86::BSR64rm:
462 case X86::LZCNT16rm:
463 case X86::LZCNT32rm:
464 case X86::LZCNT64rm:
465 case X86::POPCNT16rm:
466 case X86::POPCNT32rm:
467 case X86::POPCNT64rm:
468 case X86::TZCNT16rm:
469 case X86::TZCNT32rm:
470 case X86::TZCNT64rm:
471
472 // Bit manipulation instructions are effectively combinations of basic
473 // arithmetic ops, and should still execute in constant time. These also
474 // set flags.
475 case X86::BLCFILL32rm:
476 case X86::BLCFILL64rm:
477 case X86::BLCI32rm:
478 case X86::BLCI64rm:
479 case X86::BLCIC32rm:
480 case X86::BLCIC64rm:
481 case X86::BLCMSK32rm:
482 case X86::BLCMSK64rm:
483 case X86::BLCS32rm:
484 case X86::BLCS64rm:
485 case X86::BLSFILL32rm:
486 case X86::BLSFILL64rm:
487 case X86::BLSI32rm:
488 case X86::BLSI64rm:
489 case X86::BLSIC32rm:
490 case X86::BLSIC64rm:
491 case X86::BLSMSK32rm:
492 case X86::BLSMSK64rm:
493 case X86::BLSR32rm:
494 case X86::BLSR64rm:
495 case X86::TZMSK32rm:
496 case X86::TZMSK64rm:
497
498 // Bit extracting and clearing instructions should execute in constant time,
499 // and set flags.
500 case X86::BEXTR32rm:
501 case X86::BEXTR64rm:
502 case X86::BEXTRI32mi:
503 case X86::BEXTRI64mi:
504 case X86::BZHI32rm:
505 case X86::BZHI64rm:
506
507 // Basic arithmetic is constant time on the input but does set flags.
508 case X86::ADC8rm:
509 case X86::ADC16rm:
510 case X86::ADC32rm:
511 case X86::ADC64rm:
512 case X86::ADCX32rm:
513 case X86::ADCX64rm:
514 case X86::ADD8rm:
515 case X86::ADD16rm:
516 case X86::ADD32rm:
517 case X86::ADD64rm:
518 case X86::ADOX32rm:
519 case X86::ADOX64rm:
520 case X86::AND8rm:
521 case X86::AND16rm:
522 case X86::AND32rm:
523 case X86::AND64rm:
524 case X86::ANDN32rm:
525 case X86::ANDN64rm:
526 case X86::OR8rm:
527 case X86::OR16rm:
528 case X86::OR32rm:
529 case X86::OR64rm:
530 case X86::SBB8rm:
531 case X86::SBB16rm:
532 case X86::SBB32rm:
533 case X86::SBB64rm:
534 case X86::SUB8rm:
535 case X86::SUB16rm:
536 case X86::SUB32rm:
537 case X86::SUB64rm:
538 case X86::XOR8rm:
539 case X86::XOR16rm:
540 case X86::XOR32rm:
541 case X86::XOR64rm:
542
543 // Integer multiply w/o affecting flags is still believed to be constant
544 // time on x86. Called out separately as this is among the most surprising
545 // instructions to exhibit that behavior.
546 case X86::MULX32rm:
547 case X86::MULX64rm:
548
549 // Arithmetic instructions that are both constant time and don't set flags.
550 case X86::RORX32mi:
551 case X86::RORX64mi:
552 case X86::SARX32rm:
553 case X86::SARX64rm:
554 case X86::SHLX32rm:
555 case X86::SHLX64rm:
556 case X86::SHRX32rm:
557 case X86::SHRX64rm:
558
559 // Conversions are believed to be constant time and don't set flags.
560 case X86::CVTTSD2SI64rm:
561 case X86::VCVTTSD2SI64rm:
562 case X86::VCVTTSD2SI64Zrm:
563 case X86::CVTTSD2SIrm:
564 case X86::VCVTTSD2SIrm:
565 case X86::VCVTTSD2SIZrm:
566 case X86::CVTTSS2SI64rm:
567 case X86::VCVTTSS2SI64rm:
568 case X86::VCVTTSS2SI64Zrm:
569 case X86::CVTTSS2SIrm:
570 case X86::VCVTTSS2SIrm:
571 case X86::VCVTTSS2SIZrm:
572 case X86::CVTSI2SDrm:
573 case X86::VCVTSI2SDrm:
574 case X86::VCVTSI2SDZrm:
575 case X86::CVTSI2SSrm:
576 case X86::VCVTSI2SSrm:
577 case X86::VCVTSI2SSZrm:
578 case X86::CVTSI642SDrm:
579 case X86::VCVTSI642SDrm:
580 case X86::VCVTSI642SDZrm:
581 case X86::CVTSI642SSrm:
582 case X86::VCVTSI642SSrm:
583 case X86::VCVTSI642SSZrm:
584 case X86::CVTSS2SDrm:
585 case X86::VCVTSS2SDrm:
586 case X86::VCVTSS2SDZrm:
587 case X86::CVTSD2SSrm:
588 case X86::VCVTSD2SSrm:
589 case X86::VCVTSD2SSZrm:
590 // AVX512 added unsigned integer conversions.
591 case X86::VCVTTSD2USI64Zrm:
592 case X86::VCVTTSD2USIZrm:
593 case X86::VCVTTSS2USI64Zrm:
594 case X86::VCVTTSS2USIZrm:
595 case X86::VCVTUSI2SDZrm:
596 case X86::VCVTUSI642SDZrm:
597 case X86::VCVTUSI2SSZrm:
598 case X86::VCVTUSI642SSZrm:
599
600 // Loads to register don't set flags.
601 case X86::MOV8rm:
602 case X86::MOV8rm_NOREX:
603 case X86::MOV16rm:
604 case X86::MOV32rm:
605 case X86::MOV64rm:
606 case X86::MOVSX16rm8:
607 case X86::MOVSX32rm16:
608 case X86::MOVSX32rm8:
609 case X86::MOVSX32rm8_NOREX:
610 case X86::MOVSX64rm16:
611 case X86::MOVSX64rm32:
612 case X86::MOVSX64rm8:
613 case X86::MOVZX16rm8:
614 case X86::MOVZX32rm16:
615 case X86::MOVZX32rm8:
616 case X86::MOVZX32rm8_NOREX:
617 case X86::MOVZX64rm16:
618 case X86::MOVZX64rm8:
619 return true;
620 }
621 }
622
getSPAdjust(const MachineInstr & MI) const623 int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const {
624 const MachineFunction *MF = MI.getParent()->getParent();
625 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
626
627 if (isFrameInstr(MI)) {
628 int SPAdj = alignTo(getFrameSize(MI), TFI->getStackAlign());
629 SPAdj -= getFrameAdjustment(MI);
630 if (!isFrameSetup(MI))
631 SPAdj = -SPAdj;
632 return SPAdj;
633 }
634
635 // To know whether a call adjusts the stack, we need information
636 // that is bound to the following ADJCALLSTACKUP pseudo.
637 // Look for the next ADJCALLSTACKUP that follows the call.
638 if (MI.isCall()) {
639 const MachineBasicBlock *MBB = MI.getParent();
640 auto I = ++MachineBasicBlock::const_iterator(MI);
641 for (auto E = MBB->end(); I != E; ++I) {
642 if (I->getOpcode() == getCallFrameDestroyOpcode() ||
643 I->isCall())
644 break;
645 }
646
647 // If we could not find a frame destroy opcode, then it has already
648 // been simplified, so we don't care.
649 if (I->getOpcode() != getCallFrameDestroyOpcode())
650 return 0;
651
652 return -(I->getOperand(1).getImm());
653 }
654
655 // Currently handle only PUSHes we can reasonably expect to see
656 // in call sequences
657 switch (MI.getOpcode()) {
658 default:
659 return 0;
660 case X86::PUSH32i8:
661 case X86::PUSH32r:
662 case X86::PUSH32rmm:
663 case X86::PUSH32rmr:
664 case X86::PUSHi32:
665 return 4;
666 case X86::PUSH64i8:
667 case X86::PUSH64r:
668 case X86::PUSH64rmm:
669 case X86::PUSH64rmr:
670 case X86::PUSH64i32:
671 return 8;
672 }
673 }
674
675 /// Return true and the FrameIndex if the specified
676 /// operand and follow operands form a reference to the stack frame.
isFrameOperand(const MachineInstr & MI,unsigned int Op,int & FrameIndex) const677 bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
678 int &FrameIndex) const {
679 if (MI.getOperand(Op + X86::AddrBaseReg).isFI() &&
680 MI.getOperand(Op + X86::AddrScaleAmt).isImm() &&
681 MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
682 MI.getOperand(Op + X86::AddrDisp).isImm() &&
683 MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 &&
684 MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 &&
685 MI.getOperand(Op + X86::AddrDisp).getImm() == 0) {
686 FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex();
687 return true;
688 }
689 return false;
690 }
691
isFrameLoadOpcode(int Opcode,unsigned & MemBytes)692 static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) {
693 switch (Opcode) {
694 default:
695 return false;
696 case X86::MOV8rm:
697 case X86::KMOVBkm:
698 MemBytes = 1;
699 return true;
700 case X86::MOV16rm:
701 case X86::KMOVWkm:
702 case X86::VMOVSHZrm:
703 case X86::VMOVSHZrm_alt:
704 MemBytes = 2;
705 return true;
706 case X86::MOV32rm:
707 case X86::MOVSSrm:
708 case X86::MOVSSrm_alt:
709 case X86::VMOVSSrm:
710 case X86::VMOVSSrm_alt:
711 case X86::VMOVSSZrm:
712 case X86::VMOVSSZrm_alt:
713 case X86::KMOVDkm:
714 MemBytes = 4;
715 return true;
716 case X86::MOV64rm:
717 case X86::LD_Fp64m:
718 case X86::MOVSDrm:
719 case X86::MOVSDrm_alt:
720 case X86::VMOVSDrm:
721 case X86::VMOVSDrm_alt:
722 case X86::VMOVSDZrm:
723 case X86::VMOVSDZrm_alt:
724 case X86::MMX_MOVD64rm:
725 case X86::MMX_MOVQ64rm:
726 case X86::KMOVQkm:
727 MemBytes = 8;
728 return true;
729 case X86::MOVAPSrm:
730 case X86::MOVUPSrm:
731 case X86::MOVAPDrm:
732 case X86::MOVUPDrm:
733 case X86::MOVDQArm:
734 case X86::MOVDQUrm:
735 case X86::VMOVAPSrm:
736 case X86::VMOVUPSrm:
737 case X86::VMOVAPDrm:
738 case X86::VMOVUPDrm:
739 case X86::VMOVDQArm:
740 case X86::VMOVDQUrm:
741 case X86::VMOVAPSZ128rm:
742 case X86::VMOVUPSZ128rm:
743 case X86::VMOVAPSZ128rm_NOVLX:
744 case X86::VMOVUPSZ128rm_NOVLX:
745 case X86::VMOVAPDZ128rm:
746 case X86::VMOVUPDZ128rm:
747 case X86::VMOVDQU8Z128rm:
748 case X86::VMOVDQU16Z128rm:
749 case X86::VMOVDQA32Z128rm:
750 case X86::VMOVDQU32Z128rm:
751 case X86::VMOVDQA64Z128rm:
752 case X86::VMOVDQU64Z128rm:
753 MemBytes = 16;
754 return true;
755 case X86::VMOVAPSYrm:
756 case X86::VMOVUPSYrm:
757 case X86::VMOVAPDYrm:
758 case X86::VMOVUPDYrm:
759 case X86::VMOVDQAYrm:
760 case X86::VMOVDQUYrm:
761 case X86::VMOVAPSZ256rm:
762 case X86::VMOVUPSZ256rm:
763 case X86::VMOVAPSZ256rm_NOVLX:
764 case X86::VMOVUPSZ256rm_NOVLX:
765 case X86::VMOVAPDZ256rm:
766 case X86::VMOVUPDZ256rm:
767 case X86::VMOVDQU8Z256rm:
768 case X86::VMOVDQU16Z256rm:
769 case X86::VMOVDQA32Z256rm:
770 case X86::VMOVDQU32Z256rm:
771 case X86::VMOVDQA64Z256rm:
772 case X86::VMOVDQU64Z256rm:
773 MemBytes = 32;
774 return true;
775 case X86::VMOVAPSZrm:
776 case X86::VMOVUPSZrm:
777 case X86::VMOVAPDZrm:
778 case X86::VMOVUPDZrm:
779 case X86::VMOVDQU8Zrm:
780 case X86::VMOVDQU16Zrm:
781 case X86::VMOVDQA32Zrm:
782 case X86::VMOVDQU32Zrm:
783 case X86::VMOVDQA64Zrm:
784 case X86::VMOVDQU64Zrm:
785 MemBytes = 64;
786 return true;
787 }
788 }
789
isFrameStoreOpcode(int Opcode,unsigned & MemBytes)790 static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) {
791 switch (Opcode) {
792 default:
793 return false;
794 case X86::MOV8mr:
795 case X86::KMOVBmk:
796 MemBytes = 1;
797 return true;
798 case X86::MOV16mr:
799 case X86::KMOVWmk:
800 case X86::VMOVSHZmr:
801 MemBytes = 2;
802 return true;
803 case X86::MOV32mr:
804 case X86::MOVSSmr:
805 case X86::VMOVSSmr:
806 case X86::VMOVSSZmr:
807 case X86::KMOVDmk:
808 MemBytes = 4;
809 return true;
810 case X86::MOV64mr:
811 case X86::ST_FpP64m:
812 case X86::MOVSDmr:
813 case X86::VMOVSDmr:
814 case X86::VMOVSDZmr:
815 case X86::MMX_MOVD64mr:
816 case X86::MMX_MOVQ64mr:
817 case X86::MMX_MOVNTQmr:
818 case X86::KMOVQmk:
819 MemBytes = 8;
820 return true;
821 case X86::MOVAPSmr:
822 case X86::MOVUPSmr:
823 case X86::MOVAPDmr:
824 case X86::MOVUPDmr:
825 case X86::MOVDQAmr:
826 case X86::MOVDQUmr:
827 case X86::VMOVAPSmr:
828 case X86::VMOVUPSmr:
829 case X86::VMOVAPDmr:
830 case X86::VMOVUPDmr:
831 case X86::VMOVDQAmr:
832 case X86::VMOVDQUmr:
833 case X86::VMOVUPSZ128mr:
834 case X86::VMOVAPSZ128mr:
835 case X86::VMOVUPSZ128mr_NOVLX:
836 case X86::VMOVAPSZ128mr_NOVLX:
837 case X86::VMOVUPDZ128mr:
838 case X86::VMOVAPDZ128mr:
839 case X86::VMOVDQA32Z128mr:
840 case X86::VMOVDQU32Z128mr:
841 case X86::VMOVDQA64Z128mr:
842 case X86::VMOVDQU64Z128mr:
843 case X86::VMOVDQU8Z128mr:
844 case X86::VMOVDQU16Z128mr:
845 MemBytes = 16;
846 return true;
847 case X86::VMOVUPSYmr:
848 case X86::VMOVAPSYmr:
849 case X86::VMOVUPDYmr:
850 case X86::VMOVAPDYmr:
851 case X86::VMOVDQUYmr:
852 case X86::VMOVDQAYmr:
853 case X86::VMOVUPSZ256mr:
854 case X86::VMOVAPSZ256mr:
855 case X86::VMOVUPSZ256mr_NOVLX:
856 case X86::VMOVAPSZ256mr_NOVLX:
857 case X86::VMOVUPDZ256mr:
858 case X86::VMOVAPDZ256mr:
859 case X86::VMOVDQU8Z256mr:
860 case X86::VMOVDQU16Z256mr:
861 case X86::VMOVDQA32Z256mr:
862 case X86::VMOVDQU32Z256mr:
863 case X86::VMOVDQA64Z256mr:
864 case X86::VMOVDQU64Z256mr:
865 MemBytes = 32;
866 return true;
867 case X86::VMOVUPSZmr:
868 case X86::VMOVAPSZmr:
869 case X86::VMOVUPDZmr:
870 case X86::VMOVAPDZmr:
871 case X86::VMOVDQU8Zmr:
872 case X86::VMOVDQU16Zmr:
873 case X86::VMOVDQA32Zmr:
874 case X86::VMOVDQU32Zmr:
875 case X86::VMOVDQA64Zmr:
876 case X86::VMOVDQU64Zmr:
877 MemBytes = 64;
878 return true;
879 }
880 return false;
881 }
882
isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex) const883 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
884 int &FrameIndex) const {
885 unsigned Dummy;
886 return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy);
887 }
888
isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex,unsigned & MemBytes) const889 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
890 int &FrameIndex,
891 unsigned &MemBytes) const {
892 if (isFrameLoadOpcode(MI.getOpcode(), MemBytes))
893 if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
894 return MI.getOperand(0).getReg();
895 return 0;
896 }
897
isLoadFromStackSlotPostFE(const MachineInstr & MI,int & FrameIndex) const898 unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
899 int &FrameIndex) const {
900 unsigned Dummy;
901 if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) {
902 unsigned Reg;
903 if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
904 return Reg;
905 // Check for post-frame index elimination operations
906 SmallVector<const MachineMemOperand *, 1> Accesses;
907 if (hasLoadFromStackSlot(MI, Accesses)) {
908 FrameIndex =
909 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
910 ->getFrameIndex();
911 return MI.getOperand(0).getReg();
912 }
913 }
914 return 0;
915 }
916
isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex) const917 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
918 int &FrameIndex) const {
919 unsigned Dummy;
920 return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy);
921 }
922
isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex,unsigned & MemBytes) const923 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
924 int &FrameIndex,
925 unsigned &MemBytes) const {
926 if (isFrameStoreOpcode(MI.getOpcode(), MemBytes))
927 if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
928 isFrameOperand(MI, 0, FrameIndex))
929 return MI.getOperand(X86::AddrNumOperands).getReg();
930 return 0;
931 }
932
isStoreToStackSlotPostFE(const MachineInstr & MI,int & FrameIndex) const933 unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
934 int &FrameIndex) const {
935 unsigned Dummy;
936 if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) {
937 unsigned Reg;
938 if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
939 return Reg;
940 // Check for post-frame index elimination operations
941 SmallVector<const MachineMemOperand *, 1> Accesses;
942 if (hasStoreToStackSlot(MI, Accesses)) {
943 FrameIndex =
944 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
945 ->getFrameIndex();
946 return MI.getOperand(X86::AddrNumOperands).getReg();
947 }
948 }
949 return 0;
950 }
951
952 /// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
regIsPICBase(Register BaseReg,const MachineRegisterInfo & MRI)953 static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI) {
954 // Don't waste compile time scanning use-def chains of physregs.
955 if (!BaseReg.isVirtual())
956 return false;
957 bool isPICBase = false;
958 for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg),
959 E = MRI.def_instr_end(); I != E; ++I) {
960 MachineInstr *DefMI = &*I;
961 if (DefMI->getOpcode() != X86::MOVPC32r)
962 return false;
963 assert(!isPICBase && "More than one PIC base?");
964 isPICBase = true;
965 }
966 return isPICBase;
967 }
968
isReallyTriviallyReMaterializable(const MachineInstr & MI,AAResults * AA) const969 bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
970 AAResults *AA) const {
971 switch (MI.getOpcode()) {
972 default:
973 // This function should only be called for opcodes with the ReMaterializable
974 // flag set.
975 llvm_unreachable("Unknown rematerializable operation!");
976 break;
977
978 case X86::LOAD_STACK_GUARD:
979 case X86::AVX1_SETALLONES:
980 case X86::AVX2_SETALLONES:
981 case X86::AVX512_128_SET0:
982 case X86::AVX512_256_SET0:
983 case X86::AVX512_512_SET0:
984 case X86::AVX512_512_SETALLONES:
985 case X86::AVX512_FsFLD0SD:
986 case X86::AVX512_FsFLD0SH:
987 case X86::AVX512_FsFLD0SS:
988 case X86::AVX512_FsFLD0F128:
989 case X86::AVX_SET0:
990 case X86::FsFLD0SD:
991 case X86::FsFLD0SS:
992 case X86::FsFLD0F128:
993 case X86::KSET0D:
994 case X86::KSET0Q:
995 case X86::KSET0W:
996 case X86::KSET1D:
997 case X86::KSET1Q:
998 case X86::KSET1W:
999 case X86::MMX_SET0:
1000 case X86::MOV32ImmSExti8:
1001 case X86::MOV32r0:
1002 case X86::MOV32r1:
1003 case X86::MOV32r_1:
1004 case X86::MOV32ri64:
1005 case X86::MOV64ImmSExti8:
1006 case X86::V_SET0:
1007 case X86::V_SETALLONES:
1008 case X86::MOV16ri:
1009 case X86::MOV32ri:
1010 case X86::MOV64ri:
1011 case X86::MOV64ri32:
1012 case X86::MOV8ri:
1013 case X86::PTILEZEROV:
1014 return true;
1015
1016 case X86::MOV8rm:
1017 case X86::MOV8rm_NOREX:
1018 case X86::MOV16rm:
1019 case X86::MOV32rm:
1020 case X86::MOV64rm:
1021 case X86::MOVSSrm:
1022 case X86::MOVSSrm_alt:
1023 case X86::MOVSDrm:
1024 case X86::MOVSDrm_alt:
1025 case X86::MOVAPSrm:
1026 case X86::MOVUPSrm:
1027 case X86::MOVAPDrm:
1028 case X86::MOVUPDrm:
1029 case X86::MOVDQArm:
1030 case X86::MOVDQUrm:
1031 case X86::VMOVSSrm:
1032 case X86::VMOVSSrm_alt:
1033 case X86::VMOVSDrm:
1034 case X86::VMOVSDrm_alt:
1035 case X86::VMOVAPSrm:
1036 case X86::VMOVUPSrm:
1037 case X86::VMOVAPDrm:
1038 case X86::VMOVUPDrm:
1039 case X86::VMOVDQArm:
1040 case X86::VMOVDQUrm:
1041 case X86::VMOVAPSYrm:
1042 case X86::VMOVUPSYrm:
1043 case X86::VMOVAPDYrm:
1044 case X86::VMOVUPDYrm:
1045 case X86::VMOVDQAYrm:
1046 case X86::VMOVDQUYrm:
1047 case X86::MMX_MOVD64rm:
1048 case X86::MMX_MOVQ64rm:
1049 // AVX-512
1050 case X86::VMOVSSZrm:
1051 case X86::VMOVSSZrm_alt:
1052 case X86::VMOVSDZrm:
1053 case X86::VMOVSDZrm_alt:
1054 case X86::VMOVSHZrm:
1055 case X86::VMOVSHZrm_alt:
1056 case X86::VMOVAPDZ128rm:
1057 case X86::VMOVAPDZ256rm:
1058 case X86::VMOVAPDZrm:
1059 case X86::VMOVAPSZ128rm:
1060 case X86::VMOVAPSZ256rm:
1061 case X86::VMOVAPSZ128rm_NOVLX:
1062 case X86::VMOVAPSZ256rm_NOVLX:
1063 case X86::VMOVAPSZrm:
1064 case X86::VMOVDQA32Z128rm:
1065 case X86::VMOVDQA32Z256rm:
1066 case X86::VMOVDQA32Zrm:
1067 case X86::VMOVDQA64Z128rm:
1068 case X86::VMOVDQA64Z256rm:
1069 case X86::VMOVDQA64Zrm:
1070 case X86::VMOVDQU16Z128rm:
1071 case X86::VMOVDQU16Z256rm:
1072 case X86::VMOVDQU16Zrm:
1073 case X86::VMOVDQU32Z128rm:
1074 case X86::VMOVDQU32Z256rm:
1075 case X86::VMOVDQU32Zrm:
1076 case X86::VMOVDQU64Z128rm:
1077 case X86::VMOVDQU64Z256rm:
1078 case X86::VMOVDQU64Zrm:
1079 case X86::VMOVDQU8Z128rm:
1080 case X86::VMOVDQU8Z256rm:
1081 case X86::VMOVDQU8Zrm:
1082 case X86::VMOVUPDZ128rm:
1083 case X86::VMOVUPDZ256rm:
1084 case X86::VMOVUPDZrm:
1085 case X86::VMOVUPSZ128rm:
1086 case X86::VMOVUPSZ256rm:
1087 case X86::VMOVUPSZ128rm_NOVLX:
1088 case X86::VMOVUPSZ256rm_NOVLX:
1089 case X86::VMOVUPSZrm: {
1090 // Loads from constant pools are trivially rematerializable.
1091 if (MI.getOperand(1 + X86::AddrBaseReg).isReg() &&
1092 MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
1093 MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
1094 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
1095 MI.isDereferenceableInvariantLoad(AA)) {
1096 Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
1097 if (BaseReg == 0 || BaseReg == X86::RIP)
1098 return true;
1099 // Allow re-materialization of PIC load.
1100 if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal())
1101 return false;
1102 const MachineFunction &MF = *MI.getParent()->getParent();
1103 const MachineRegisterInfo &MRI = MF.getRegInfo();
1104 return regIsPICBase(BaseReg, MRI);
1105 }
1106 return false;
1107 }
1108
1109 case X86::LEA32r:
1110 case X86::LEA64r: {
1111 if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
1112 MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
1113 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
1114 !MI.getOperand(1 + X86::AddrDisp).isReg()) {
1115 // lea fi#, lea GV, etc. are all rematerializable.
1116 if (!MI.getOperand(1 + X86::AddrBaseReg).isReg())
1117 return true;
1118 Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
1119 if (BaseReg == 0)
1120 return true;
1121 // Allow re-materialization of lea PICBase + x.
1122 const MachineFunction &MF = *MI.getParent()->getParent();
1123 const MachineRegisterInfo &MRI = MF.getRegInfo();
1124 return regIsPICBase(BaseReg, MRI);
1125 }
1126 return false;
1127 }
1128 }
1129 }
1130
reMaterialize(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register DestReg,unsigned SubIdx,const MachineInstr & Orig,const TargetRegisterInfo & TRI) const1131 void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
1132 MachineBasicBlock::iterator I,
1133 Register DestReg, unsigned SubIdx,
1134 const MachineInstr &Orig,
1135 const TargetRegisterInfo &TRI) const {
1136 bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
1137 if (ClobbersEFLAGS && MBB.computeRegisterLiveness(&TRI, X86::EFLAGS, I) !=
1138 MachineBasicBlock::LQR_Dead) {
1139 // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
1140 // effects.
1141 int Value;
1142 switch (Orig.getOpcode()) {
1143 case X86::MOV32r0: Value = 0; break;
1144 case X86::MOV32r1: Value = 1; break;
1145 case X86::MOV32r_1: Value = -1; break;
1146 default:
1147 llvm_unreachable("Unexpected instruction!");
1148 }
1149
1150 const DebugLoc &DL = Orig.getDebugLoc();
1151 BuildMI(MBB, I, DL, get(X86::MOV32ri))
1152 .add(Orig.getOperand(0))
1153 .addImm(Value);
1154 } else {
1155 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
1156 MBB.insert(I, MI);
1157 }
1158
1159 MachineInstr &NewMI = *std::prev(I);
1160 NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
1161 }
1162
1163 /// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
hasLiveCondCodeDef(MachineInstr & MI) const1164 bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const {
1165 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1166 MachineOperand &MO = MI.getOperand(i);
1167 if (MO.isReg() && MO.isDef() &&
1168 MO.getReg() == X86::EFLAGS && !MO.isDead()) {
1169 return true;
1170 }
1171 }
1172 return false;
1173 }
1174
1175 /// Check whether the shift count for a machine operand is non-zero.
getTruncatedShiftCount(const MachineInstr & MI,unsigned ShiftAmtOperandIdx)1176 inline static unsigned getTruncatedShiftCount(const MachineInstr &MI,
1177 unsigned ShiftAmtOperandIdx) {
1178 // The shift count is six bits with the REX.W prefix and five bits without.
1179 unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
1180 unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm();
1181 return Imm & ShiftCountMask;
1182 }
1183
1184 /// Check whether the given shift count is appropriate
1185 /// can be represented by a LEA instruction.
isTruncatedShiftCountForLEA(unsigned ShAmt)1186 inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
1187 // Left shift instructions can be transformed into load-effective-address
1188 // instructions if we can encode them appropriately.
1189 // A LEA instruction utilizes a SIB byte to encode its scale factor.
1190 // The SIB.scale field is two bits wide which means that we can encode any
1191 // shift amount less than 4.
1192 return ShAmt < 4 && ShAmt > 0;
1193 }
1194
classifyLEAReg(MachineInstr & MI,const MachineOperand & Src,unsigned Opc,bool AllowSP,Register & NewSrc,bool & isKill,MachineOperand & ImplicitOp,LiveVariables * LV) const1195 bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
1196 unsigned Opc, bool AllowSP, Register &NewSrc,
1197 bool &isKill, MachineOperand &ImplicitOp,
1198 LiveVariables *LV) const {
1199 MachineFunction &MF = *MI.getParent()->getParent();
1200 const TargetRegisterClass *RC;
1201 if (AllowSP) {
1202 RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
1203 } else {
1204 RC = Opc != X86::LEA32r ?
1205 &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
1206 }
1207 Register SrcReg = Src.getReg();
1208 isKill = MI.killsRegister(SrcReg);
1209
1210 // For both LEA64 and LEA32 the register already has essentially the right
1211 // type (32-bit or 64-bit) we may just need to forbid SP.
1212 if (Opc != X86::LEA64_32r) {
1213 NewSrc = SrcReg;
1214 assert(!Src.isUndef() && "Undef op doesn't need optimization");
1215
1216 if (NewSrc.isVirtual() && !MF.getRegInfo().constrainRegClass(NewSrc, RC))
1217 return false;
1218
1219 return true;
1220 }
1221
1222 // This is for an LEA64_32r and incoming registers are 32-bit. One way or
1223 // another we need to add 64-bit registers to the final MI.
1224 if (SrcReg.isPhysical()) {
1225 ImplicitOp = Src;
1226 ImplicitOp.setImplicit();
1227
1228 NewSrc = getX86SubSuperRegister(SrcReg, 64);
1229 assert(!Src.isUndef() && "Undef op doesn't need optimization");
1230 } else {
1231 // Virtual register of the wrong class, we have to create a temporary 64-bit
1232 // vreg to feed into the LEA.
1233 NewSrc = MF.getRegInfo().createVirtualRegister(RC);
1234 MachineInstr *Copy =
1235 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1236 .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
1237 .addReg(SrcReg, getKillRegState(isKill));
1238
1239 // Which is obviously going to be dead after we're done with it.
1240 isKill = true;
1241
1242 if (LV)
1243 LV->replaceKillInstruction(SrcReg, MI, *Copy);
1244 }
1245
1246 // We've set all the parameters without issue.
1247 return true;
1248 }
1249
convertToThreeAddressWithLEA(unsigned MIOpc,MachineInstr & MI,LiveVariables * LV,bool Is8BitOp) const1250 MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
1251 MachineInstr &MI,
1252 LiveVariables *LV,
1253 bool Is8BitOp) const {
1254 // We handle 8-bit adds and various 16-bit opcodes in the switch below.
1255 MachineBasicBlock &MBB = *MI.getParent();
1256 MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
1257 assert((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
1258 *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) &&
1259 "Unexpected type for LEA transform");
1260
1261 // TODO: For a 32-bit target, we need to adjust the LEA variables with
1262 // something like this:
1263 // Opcode = X86::LEA32r;
1264 // InRegLEA = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1265 // OutRegLEA =
1266 // Is8BitOp ? RegInfo.createVirtualRegister(&X86::GR32ABCD_RegClass)
1267 // : RegInfo.createVirtualRegister(&X86::GR32RegClass);
1268 if (!Subtarget.is64Bit())
1269 return nullptr;
1270
1271 unsigned Opcode = X86::LEA64_32r;
1272 Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1273 Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1274
1275 // Build and insert into an implicit UNDEF value. This is OK because
1276 // we will be shifting and then extracting the lower 8/16-bits.
1277 // This has the potential to cause partial register stall. e.g.
1278 // movw (%rbp,%rcx,2), %dx
1279 // leal -65(%rdx), %esi
1280 // But testing has shown this *does* help performance in 64-bit mode (at
1281 // least on modern x86 machines).
1282 MachineBasicBlock::iterator MBBI = MI.getIterator();
1283 Register Dest = MI.getOperand(0).getReg();
1284 Register Src = MI.getOperand(1).getReg();
1285 bool IsDead = MI.getOperand(0).isDead();
1286 bool IsKill = MI.getOperand(1).isKill();
1287 unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
1288 assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization");
1289 BuildMI(MBB, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA);
1290 MachineInstr *InsMI =
1291 BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1292 .addReg(InRegLEA, RegState::Define, SubReg)
1293 .addReg(Src, getKillRegState(IsKill));
1294
1295 MachineInstrBuilder MIB =
1296 BuildMI(MBB, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA);
1297 switch (MIOpc) {
1298 default: llvm_unreachable("Unreachable!");
1299 case X86::SHL8ri:
1300 case X86::SHL16ri: {
1301 unsigned ShAmt = MI.getOperand(2).getImm();
1302 MIB.addReg(0).addImm(1ULL << ShAmt)
1303 .addReg(InRegLEA, RegState::Kill).addImm(0).addReg(0);
1304 break;
1305 }
1306 case X86::INC8r:
1307 case X86::INC16r:
1308 addRegOffset(MIB, InRegLEA, true, 1);
1309 break;
1310 case X86::DEC8r:
1311 case X86::DEC16r:
1312 addRegOffset(MIB, InRegLEA, true, -1);
1313 break;
1314 case X86::ADD8ri:
1315 case X86::ADD8ri_DB:
1316 case X86::ADD16ri:
1317 case X86::ADD16ri8:
1318 case X86::ADD16ri_DB:
1319 case X86::ADD16ri8_DB:
1320 addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm());
1321 break;
1322 case X86::ADD8rr:
1323 case X86::ADD8rr_DB:
1324 case X86::ADD16rr:
1325 case X86::ADD16rr_DB: {
1326 Register Src2 = MI.getOperand(2).getReg();
1327 bool IsKill2 = MI.getOperand(2).isKill();
1328 assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization");
1329 unsigned InRegLEA2 = 0;
1330 MachineInstr *InsMI2 = nullptr;
1331 if (Src == Src2) {
1332 // ADD8rr/ADD16rr killed %reg1028, %reg1028
1333 // just a single insert_subreg.
1334 addRegReg(MIB, InRegLEA, true, InRegLEA, false);
1335 } else {
1336 if (Subtarget.is64Bit())
1337 InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1338 else
1339 InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1340 // Build and insert into an implicit UNDEF value. This is OK because
1341 // we will be shifting and then extracting the lower 8/16-bits.
1342 BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA2);
1343 InsMI2 = BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
1344 .addReg(InRegLEA2, RegState::Define, SubReg)
1345 .addReg(Src2, getKillRegState(IsKill2));
1346 addRegReg(MIB, InRegLEA, true, InRegLEA2, true);
1347 }
1348 if (LV && IsKill2 && InsMI2)
1349 LV->replaceKillInstruction(Src2, MI, *InsMI2);
1350 break;
1351 }
1352 }
1353
1354 MachineInstr *NewMI = MIB;
1355 MachineInstr *ExtMI =
1356 BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1357 .addReg(Dest, RegState::Define | getDeadRegState(IsDead))
1358 .addReg(OutRegLEA, RegState::Kill, SubReg);
1359
1360 if (LV) {
1361 // Update live variables.
1362 LV->getVarInfo(InRegLEA).Kills.push_back(NewMI);
1363 LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI);
1364 if (IsKill)
1365 LV->replaceKillInstruction(Src, MI, *InsMI);
1366 if (IsDead)
1367 LV->replaceKillInstruction(Dest, MI, *ExtMI);
1368 }
1369
1370 return ExtMI;
1371 }
1372
1373 /// This method must be implemented by targets that
1374 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
1375 /// may be able to convert a two-address instruction into a true
1376 /// three-address instruction on demand. This allows the X86 target (for
1377 /// example) to convert ADD and SHL instructions into LEA instructions if they
1378 /// would require register copies due to two-addressness.
1379 ///
1380 /// This method returns a null pointer if the transformation cannot be
1381 /// performed, otherwise it returns the new instruction.
1382 ///
convertToThreeAddress(MachineInstr & MI,LiveVariables * LV) const1383 MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
1384 LiveVariables *LV) const {
1385 // The following opcodes also sets the condition code register(s). Only
1386 // convert them to equivalent lea if the condition code register def's
1387 // are dead!
1388 if (hasLiveCondCodeDef(MI))
1389 return nullptr;
1390
1391 MachineFunction &MF = *MI.getParent()->getParent();
1392 // All instructions input are two-addr instructions. Get the known operands.
1393 const MachineOperand &Dest = MI.getOperand(0);
1394 const MachineOperand &Src = MI.getOperand(1);
1395
1396 // Ideally, operations with undef should be folded before we get here, but we
1397 // can't guarantee it. Bail out because optimizing undefs is a waste of time.
1398 // Without this, we have to forward undef state to new register operands to
1399 // avoid machine verifier errors.
1400 if (Src.isUndef())
1401 return nullptr;
1402 if (MI.getNumOperands() > 2)
1403 if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef())
1404 return nullptr;
1405
1406 MachineInstr *NewMI = nullptr;
1407 bool Is64Bit = Subtarget.is64Bit();
1408
1409 bool Is8BitOp = false;
1410 unsigned MIOpc = MI.getOpcode();
1411 switch (MIOpc) {
1412 default: llvm_unreachable("Unreachable!");
1413 case X86::SHL64ri: {
1414 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1415 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1416 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
1417
1418 // LEA can't handle RSP.
1419 if (Src.getReg().isVirtual() && !MF.getRegInfo().constrainRegClass(
1420 Src.getReg(), &X86::GR64_NOSPRegClass))
1421 return nullptr;
1422
1423 NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
1424 .add(Dest)
1425 .addReg(0)
1426 .addImm(1ULL << ShAmt)
1427 .add(Src)
1428 .addImm(0)
1429 .addReg(0);
1430 break;
1431 }
1432 case X86::SHL32ri: {
1433 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1434 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1435 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
1436
1437 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1438
1439 // LEA can't handle ESP.
1440 bool isKill;
1441 Register SrcReg;
1442 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1443 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
1444 SrcReg, isKill, ImplicitOp, LV))
1445 return nullptr;
1446
1447 MachineInstrBuilder MIB =
1448 BuildMI(MF, MI.getDebugLoc(), get(Opc))
1449 .add(Dest)
1450 .addReg(0)
1451 .addImm(1ULL << ShAmt)
1452 .addReg(SrcReg, getKillRegState(isKill))
1453 .addImm(0)
1454 .addReg(0);
1455 if (ImplicitOp.getReg() != 0)
1456 MIB.add(ImplicitOp);
1457 NewMI = MIB;
1458
1459 break;
1460 }
1461 case X86::SHL8ri:
1462 Is8BitOp = true;
1463 LLVM_FALLTHROUGH;
1464 case X86::SHL16ri: {
1465 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1466 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1467 if (!isTruncatedShiftCountForLEA(ShAmt))
1468 return nullptr;
1469 return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
1470 }
1471 case X86::INC64r:
1472 case X86::INC32r: {
1473 assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
1474 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r :
1475 (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1476 bool isKill;
1477 Register SrcReg;
1478 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1479 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
1480 ImplicitOp, LV))
1481 return nullptr;
1482
1483 MachineInstrBuilder MIB =
1484 BuildMI(MF, MI.getDebugLoc(), get(Opc))
1485 .add(Dest)
1486 .addReg(SrcReg, getKillRegState(isKill));
1487 if (ImplicitOp.getReg() != 0)
1488 MIB.add(ImplicitOp);
1489
1490 NewMI = addOffset(MIB, 1);
1491 break;
1492 }
1493 case X86::DEC64r:
1494 case X86::DEC32r: {
1495 assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
1496 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
1497 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1498
1499 bool isKill;
1500 Register SrcReg;
1501 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1502 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
1503 ImplicitOp, LV))
1504 return nullptr;
1505
1506 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1507 .add(Dest)
1508 .addReg(SrcReg, getKillRegState(isKill));
1509 if (ImplicitOp.getReg() != 0)
1510 MIB.add(ImplicitOp);
1511
1512 NewMI = addOffset(MIB, -1);
1513
1514 break;
1515 }
1516 case X86::DEC8r:
1517 case X86::INC8r:
1518 Is8BitOp = true;
1519 LLVM_FALLTHROUGH;
1520 case X86::DEC16r:
1521 case X86::INC16r:
1522 return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
1523 case X86::ADD64rr:
1524 case X86::ADD64rr_DB:
1525 case X86::ADD32rr:
1526 case X86::ADD32rr_DB: {
1527 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1528 unsigned Opc;
1529 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
1530 Opc = X86::LEA64r;
1531 else
1532 Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1533
1534 const MachineOperand &Src2 = MI.getOperand(2);
1535 bool isKill2;
1536 Register SrcReg2;
1537 MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
1538 if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
1539 SrcReg2, isKill2, ImplicitOp2, LV))
1540 return nullptr;
1541
1542 bool isKill;
1543 Register SrcReg;
1544 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1545 if (Src.getReg() == Src2.getReg()) {
1546 // Don't call classify LEAReg a second time on the same register, in case
1547 // the first call inserted a COPY from Src2 and marked it as killed.
1548 isKill = isKill2;
1549 SrcReg = SrcReg2;
1550 } else {
1551 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true,
1552 SrcReg, isKill, ImplicitOp, LV))
1553 return nullptr;
1554 }
1555
1556 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest);
1557 if (ImplicitOp.getReg() != 0)
1558 MIB.add(ImplicitOp);
1559 if (ImplicitOp2.getReg() != 0)
1560 MIB.add(ImplicitOp2);
1561
1562 NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2);
1563 if (LV && Src2.isKill())
1564 LV->replaceKillInstruction(SrcReg2, MI, *NewMI);
1565 break;
1566 }
1567 case X86::ADD8rr:
1568 case X86::ADD8rr_DB:
1569 Is8BitOp = true;
1570 LLVM_FALLTHROUGH;
1571 case X86::ADD16rr:
1572 case X86::ADD16rr_DB:
1573 return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
1574 case X86::ADD64ri32:
1575 case X86::ADD64ri8:
1576 case X86::ADD64ri32_DB:
1577 case X86::ADD64ri8_DB:
1578 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1579 NewMI = addOffset(
1580 BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src),
1581 MI.getOperand(2));
1582 break;
1583 case X86::ADD32ri:
1584 case X86::ADD32ri8:
1585 case X86::ADD32ri_DB:
1586 case X86::ADD32ri8_DB: {
1587 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1588 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1589
1590 bool isKill;
1591 Register SrcReg;
1592 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1593 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1594 SrcReg, isKill, ImplicitOp, LV))
1595 return nullptr;
1596
1597 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1598 .add(Dest)
1599 .addReg(SrcReg, getKillRegState(isKill));
1600 if (ImplicitOp.getReg() != 0)
1601 MIB.add(ImplicitOp);
1602
1603 NewMI = addOffset(MIB, MI.getOperand(2));
1604 break;
1605 }
1606 case X86::ADD8ri:
1607 case X86::ADD8ri_DB:
1608 Is8BitOp = true;
1609 LLVM_FALLTHROUGH;
1610 case X86::ADD16ri:
1611 case X86::ADD16ri8:
1612 case X86::ADD16ri_DB:
1613 case X86::ADD16ri8_DB:
1614 return convertToThreeAddressWithLEA(MIOpc, MI, LV, Is8BitOp);
1615 case X86::SUB8ri:
1616 case X86::SUB16ri8:
1617 case X86::SUB16ri:
1618 /// FIXME: Support these similar to ADD8ri/ADD16ri*.
1619 return nullptr;
1620 case X86::SUB32ri8:
1621 case X86::SUB32ri: {
1622 if (!MI.getOperand(2).isImm())
1623 return nullptr;
1624 int64_t Imm = MI.getOperand(2).getImm();
1625 if (!isInt<32>(-Imm))
1626 return nullptr;
1627
1628 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1629 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1630
1631 bool isKill;
1632 Register SrcReg;
1633 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1634 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1635 SrcReg, isKill, ImplicitOp, LV))
1636 return nullptr;
1637
1638 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1639 .add(Dest)
1640 .addReg(SrcReg, getKillRegState(isKill));
1641 if (ImplicitOp.getReg() != 0)
1642 MIB.add(ImplicitOp);
1643
1644 NewMI = addOffset(MIB, -Imm);
1645 break;
1646 }
1647
1648 case X86::SUB64ri8:
1649 case X86::SUB64ri32: {
1650 if (!MI.getOperand(2).isImm())
1651 return nullptr;
1652 int64_t Imm = MI.getOperand(2).getImm();
1653 if (!isInt<32>(-Imm))
1654 return nullptr;
1655
1656 assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!");
1657
1658 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(),
1659 get(X86::LEA64r)).add(Dest).add(Src);
1660 NewMI = addOffset(MIB, -Imm);
1661 break;
1662 }
1663
1664 case X86::VMOVDQU8Z128rmk:
1665 case X86::VMOVDQU8Z256rmk:
1666 case X86::VMOVDQU8Zrmk:
1667 case X86::VMOVDQU16Z128rmk:
1668 case X86::VMOVDQU16Z256rmk:
1669 case X86::VMOVDQU16Zrmk:
1670 case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk:
1671 case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk:
1672 case X86::VMOVDQU32Zrmk: case X86::VMOVDQA32Zrmk:
1673 case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk:
1674 case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk:
1675 case X86::VMOVDQU64Zrmk: case X86::VMOVDQA64Zrmk:
1676 case X86::VMOVUPDZ128rmk: case X86::VMOVAPDZ128rmk:
1677 case X86::VMOVUPDZ256rmk: case X86::VMOVAPDZ256rmk:
1678 case X86::VMOVUPDZrmk: case X86::VMOVAPDZrmk:
1679 case X86::VMOVUPSZ128rmk: case X86::VMOVAPSZ128rmk:
1680 case X86::VMOVUPSZ256rmk: case X86::VMOVAPSZ256rmk:
1681 case X86::VMOVUPSZrmk: case X86::VMOVAPSZrmk:
1682 case X86::VBROADCASTSDZ256rmk:
1683 case X86::VBROADCASTSDZrmk:
1684 case X86::VBROADCASTSSZ128rmk:
1685 case X86::VBROADCASTSSZ256rmk:
1686 case X86::VBROADCASTSSZrmk:
1687 case X86::VPBROADCASTDZ128rmk:
1688 case X86::VPBROADCASTDZ256rmk:
1689 case X86::VPBROADCASTDZrmk:
1690 case X86::VPBROADCASTQZ128rmk:
1691 case X86::VPBROADCASTQZ256rmk:
1692 case X86::VPBROADCASTQZrmk: {
1693 unsigned Opc;
1694 switch (MIOpc) {
1695 default: llvm_unreachable("Unreachable!");
1696 case X86::VMOVDQU8Z128rmk: Opc = X86::VPBLENDMBZ128rmk; break;
1697 case X86::VMOVDQU8Z256rmk: Opc = X86::VPBLENDMBZ256rmk; break;
1698 case X86::VMOVDQU8Zrmk: Opc = X86::VPBLENDMBZrmk; break;
1699 case X86::VMOVDQU16Z128rmk: Opc = X86::VPBLENDMWZ128rmk; break;
1700 case X86::VMOVDQU16Z256rmk: Opc = X86::VPBLENDMWZ256rmk; break;
1701 case X86::VMOVDQU16Zrmk: Opc = X86::VPBLENDMWZrmk; break;
1702 case X86::VMOVDQU32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break;
1703 case X86::VMOVDQU32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break;
1704 case X86::VMOVDQU32Zrmk: Opc = X86::VPBLENDMDZrmk; break;
1705 case X86::VMOVDQU64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break;
1706 case X86::VMOVDQU64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break;
1707 case X86::VMOVDQU64Zrmk: Opc = X86::VPBLENDMQZrmk; break;
1708 case X86::VMOVUPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break;
1709 case X86::VMOVUPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break;
1710 case X86::VMOVUPDZrmk: Opc = X86::VBLENDMPDZrmk; break;
1711 case X86::VMOVUPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break;
1712 case X86::VMOVUPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break;
1713 case X86::VMOVUPSZrmk: Opc = X86::VBLENDMPSZrmk; break;
1714 case X86::VMOVDQA32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break;
1715 case X86::VMOVDQA32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break;
1716 case X86::VMOVDQA32Zrmk: Opc = X86::VPBLENDMDZrmk; break;
1717 case X86::VMOVDQA64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break;
1718 case X86::VMOVDQA64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break;
1719 case X86::VMOVDQA64Zrmk: Opc = X86::VPBLENDMQZrmk; break;
1720 case X86::VMOVAPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break;
1721 case X86::VMOVAPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break;
1722 case X86::VMOVAPDZrmk: Opc = X86::VBLENDMPDZrmk; break;
1723 case X86::VMOVAPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break;
1724 case X86::VMOVAPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break;
1725 case X86::VMOVAPSZrmk: Opc = X86::VBLENDMPSZrmk; break;
1726 case X86::VBROADCASTSDZ256rmk: Opc = X86::VBLENDMPDZ256rmbk; break;
1727 case X86::VBROADCASTSDZrmk: Opc = X86::VBLENDMPDZrmbk; break;
1728 case X86::VBROADCASTSSZ128rmk: Opc = X86::VBLENDMPSZ128rmbk; break;
1729 case X86::VBROADCASTSSZ256rmk: Opc = X86::VBLENDMPSZ256rmbk; break;
1730 case X86::VBROADCASTSSZrmk: Opc = X86::VBLENDMPSZrmbk; break;
1731 case X86::VPBROADCASTDZ128rmk: Opc = X86::VPBLENDMDZ128rmbk; break;
1732 case X86::VPBROADCASTDZ256rmk: Opc = X86::VPBLENDMDZ256rmbk; break;
1733 case X86::VPBROADCASTDZrmk: Opc = X86::VPBLENDMDZrmbk; break;
1734 case X86::VPBROADCASTQZ128rmk: Opc = X86::VPBLENDMQZ128rmbk; break;
1735 case X86::VPBROADCASTQZ256rmk: Opc = X86::VPBLENDMQZ256rmbk; break;
1736 case X86::VPBROADCASTQZrmk: Opc = X86::VPBLENDMQZrmbk; break;
1737 }
1738
1739 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1740 .add(Dest)
1741 .add(MI.getOperand(2))
1742 .add(Src)
1743 .add(MI.getOperand(3))
1744 .add(MI.getOperand(4))
1745 .add(MI.getOperand(5))
1746 .add(MI.getOperand(6))
1747 .add(MI.getOperand(7));
1748 break;
1749 }
1750
1751 case X86::VMOVDQU8Z128rrk:
1752 case X86::VMOVDQU8Z256rrk:
1753 case X86::VMOVDQU8Zrrk:
1754 case X86::VMOVDQU16Z128rrk:
1755 case X86::VMOVDQU16Z256rrk:
1756 case X86::VMOVDQU16Zrrk:
1757 case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk:
1758 case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk:
1759 case X86::VMOVDQU32Zrrk: case X86::VMOVDQA32Zrrk:
1760 case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk:
1761 case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk:
1762 case X86::VMOVDQU64Zrrk: case X86::VMOVDQA64Zrrk:
1763 case X86::VMOVUPDZ128rrk: case X86::VMOVAPDZ128rrk:
1764 case X86::VMOVUPDZ256rrk: case X86::VMOVAPDZ256rrk:
1765 case X86::VMOVUPDZrrk: case X86::VMOVAPDZrrk:
1766 case X86::VMOVUPSZ128rrk: case X86::VMOVAPSZ128rrk:
1767 case X86::VMOVUPSZ256rrk: case X86::VMOVAPSZ256rrk:
1768 case X86::VMOVUPSZrrk: case X86::VMOVAPSZrrk: {
1769 unsigned Opc;
1770 switch (MIOpc) {
1771 default: llvm_unreachable("Unreachable!");
1772 case X86::VMOVDQU8Z128rrk: Opc = X86::VPBLENDMBZ128rrk; break;
1773 case X86::VMOVDQU8Z256rrk: Opc = X86::VPBLENDMBZ256rrk; break;
1774 case X86::VMOVDQU8Zrrk: Opc = X86::VPBLENDMBZrrk; break;
1775 case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break;
1776 case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break;
1777 case X86::VMOVDQU16Zrrk: Opc = X86::VPBLENDMWZrrk; break;
1778 case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
1779 case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
1780 case X86::VMOVDQU32Zrrk: Opc = X86::VPBLENDMDZrrk; break;
1781 case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
1782 case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
1783 case X86::VMOVDQU64Zrrk: Opc = X86::VPBLENDMQZrrk; break;
1784 case X86::VMOVUPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break;
1785 case X86::VMOVUPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break;
1786 case X86::VMOVUPDZrrk: Opc = X86::VBLENDMPDZrrk; break;
1787 case X86::VMOVUPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break;
1788 case X86::VMOVUPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break;
1789 case X86::VMOVUPSZrrk: Opc = X86::VBLENDMPSZrrk; break;
1790 case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
1791 case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
1792 case X86::VMOVDQA32Zrrk: Opc = X86::VPBLENDMDZrrk; break;
1793 case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
1794 case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
1795 case X86::VMOVDQA64Zrrk: Opc = X86::VPBLENDMQZrrk; break;
1796 case X86::VMOVAPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break;
1797 case X86::VMOVAPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break;
1798 case X86::VMOVAPDZrrk: Opc = X86::VBLENDMPDZrrk; break;
1799 case X86::VMOVAPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break;
1800 case X86::VMOVAPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break;
1801 case X86::VMOVAPSZrrk: Opc = X86::VBLENDMPSZrrk; break;
1802 }
1803
1804 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1805 .add(Dest)
1806 .add(MI.getOperand(2))
1807 .add(Src)
1808 .add(MI.getOperand(3));
1809 break;
1810 }
1811 }
1812
1813 if (!NewMI) return nullptr;
1814
1815 if (LV) { // Update live variables
1816 if (Src.isKill())
1817 LV->replaceKillInstruction(Src.getReg(), MI, *NewMI);
1818 if (Dest.isDead())
1819 LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI);
1820 }
1821
1822 MachineBasicBlock &MBB = *MI.getParent();
1823 MBB.insert(MI.getIterator(), NewMI); // Insert the new inst
1824 return NewMI;
1825 }
1826
1827 /// This determines which of three possible cases of a three source commute
1828 /// the source indexes correspond to taking into account any mask operands.
1829 /// All prevents commuting a passthru operand. Returns -1 if the commute isn't
1830 /// possible.
1831 /// Case 0 - Possible to commute the first and second operands.
1832 /// Case 1 - Possible to commute the first and third operands.
1833 /// Case 2 - Possible to commute the second and third operands.
getThreeSrcCommuteCase(uint64_t TSFlags,unsigned SrcOpIdx1,unsigned SrcOpIdx2)1834 static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1,
1835 unsigned SrcOpIdx2) {
1836 // Put the lowest index to SrcOpIdx1 to simplify the checks below.
1837 if (SrcOpIdx1 > SrcOpIdx2)
1838 std::swap(SrcOpIdx1, SrcOpIdx2);
1839
1840 unsigned Op1 = 1, Op2 = 2, Op3 = 3;
1841 if (X86II::isKMasked(TSFlags)) {
1842 Op2++;
1843 Op3++;
1844 }
1845
1846 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
1847 return 0;
1848 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
1849 return 1;
1850 if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
1851 return 2;
1852 llvm_unreachable("Unknown three src commute case.");
1853 }
1854
getFMA3OpcodeToCommuteOperands(const MachineInstr & MI,unsigned SrcOpIdx1,unsigned SrcOpIdx2,const X86InstrFMA3Group & FMA3Group) const1855 unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
1856 const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2,
1857 const X86InstrFMA3Group &FMA3Group) const {
1858
1859 unsigned Opc = MI.getOpcode();
1860
1861 // TODO: Commuting the 1st operand of FMA*_Int requires some additional
1862 // analysis. The commute optimization is legal only if all users of FMA*_Int
1863 // use only the lowest element of the FMA*_Int instruction. Such analysis are
1864 // not implemented yet. So, just return 0 in that case.
1865 // When such analysis are available this place will be the right place for
1866 // calling it.
1867 assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) &&
1868 "Intrinsic instructions can't commute operand 1");
1869
1870 // Determine which case this commute is or if it can't be done.
1871 unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
1872 SrcOpIdx2);
1873 assert(Case < 3 && "Unexpected case number!");
1874
1875 // Define the FMA forms mapping array that helps to map input FMA form
1876 // to output FMA form to preserve the operation semantics after
1877 // commuting the operands.
1878 const unsigned Form132Index = 0;
1879 const unsigned Form213Index = 1;
1880 const unsigned Form231Index = 2;
1881 static const unsigned FormMapping[][3] = {
1882 // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2;
1883 // FMA132 A, C, b; ==> FMA231 C, A, b;
1884 // FMA213 B, A, c; ==> FMA213 A, B, c;
1885 // FMA231 C, A, b; ==> FMA132 A, C, b;
1886 { Form231Index, Form213Index, Form132Index },
1887 // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3;
1888 // FMA132 A, c, B; ==> FMA132 B, c, A;
1889 // FMA213 B, a, C; ==> FMA231 C, a, B;
1890 // FMA231 C, a, B; ==> FMA213 B, a, C;
1891 { Form132Index, Form231Index, Form213Index },
1892 // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3;
1893 // FMA132 a, C, B; ==> FMA213 a, B, C;
1894 // FMA213 b, A, C; ==> FMA132 b, C, A;
1895 // FMA231 c, A, B; ==> FMA231 c, B, A;
1896 { Form213Index, Form132Index, Form231Index }
1897 };
1898
1899 unsigned FMAForms[3];
1900 FMAForms[0] = FMA3Group.get132Opcode();
1901 FMAForms[1] = FMA3Group.get213Opcode();
1902 FMAForms[2] = FMA3Group.get231Opcode();
1903 unsigned FormIndex;
1904 for (FormIndex = 0; FormIndex < 3; FormIndex++)
1905 if (Opc == FMAForms[FormIndex])
1906 break;
1907
1908 // Everything is ready, just adjust the FMA opcode and return it.
1909 FormIndex = FormMapping[Case][FormIndex];
1910 return FMAForms[FormIndex];
1911 }
1912
commuteVPTERNLOG(MachineInstr & MI,unsigned SrcOpIdx1,unsigned SrcOpIdx2)1913 static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1,
1914 unsigned SrcOpIdx2) {
1915 // Determine which case this commute is or if it can't be done.
1916 unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
1917 SrcOpIdx2);
1918 assert(Case < 3 && "Unexpected case value!");
1919
1920 // For each case we need to swap two pairs of bits in the final immediate.
1921 static const uint8_t SwapMasks[3][4] = {
1922 { 0x04, 0x10, 0x08, 0x20 }, // Swap bits 2/4 and 3/5.
1923 { 0x02, 0x10, 0x08, 0x40 }, // Swap bits 1/4 and 3/6.
1924 { 0x02, 0x04, 0x20, 0x40 }, // Swap bits 1/2 and 5/6.
1925 };
1926
1927 uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm();
1928 // Clear out the bits we are swapping.
1929 uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
1930 SwapMasks[Case][2] | SwapMasks[Case][3]);
1931 // If the immediate had a bit of the pair set, then set the opposite bit.
1932 if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1];
1933 if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0];
1934 if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3];
1935 if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2];
1936 MI.getOperand(MI.getNumOperands()-1).setImm(NewImm);
1937 }
1938
1939 // Returns true if this is a VPERMI2 or VPERMT2 instruction that can be
1940 // commuted.
isCommutableVPERMV3Instruction(unsigned Opcode)1941 static bool isCommutableVPERMV3Instruction(unsigned Opcode) {
1942 #define VPERM_CASES(Suffix) \
1943 case X86::VPERMI2##Suffix##128rr: case X86::VPERMT2##Suffix##128rr: \
1944 case X86::VPERMI2##Suffix##256rr: case X86::VPERMT2##Suffix##256rr: \
1945 case X86::VPERMI2##Suffix##rr: case X86::VPERMT2##Suffix##rr: \
1946 case X86::VPERMI2##Suffix##128rm: case X86::VPERMT2##Suffix##128rm: \
1947 case X86::VPERMI2##Suffix##256rm: case X86::VPERMT2##Suffix##256rm: \
1948 case X86::VPERMI2##Suffix##rm: case X86::VPERMT2##Suffix##rm: \
1949 case X86::VPERMI2##Suffix##128rrkz: case X86::VPERMT2##Suffix##128rrkz: \
1950 case X86::VPERMI2##Suffix##256rrkz: case X86::VPERMT2##Suffix##256rrkz: \
1951 case X86::VPERMI2##Suffix##rrkz: case X86::VPERMT2##Suffix##rrkz: \
1952 case X86::VPERMI2##Suffix##128rmkz: case X86::VPERMT2##Suffix##128rmkz: \
1953 case X86::VPERMI2##Suffix##256rmkz: case X86::VPERMT2##Suffix##256rmkz: \
1954 case X86::VPERMI2##Suffix##rmkz: case X86::VPERMT2##Suffix##rmkz:
1955
1956 #define VPERM_CASES_BROADCAST(Suffix) \
1957 VPERM_CASES(Suffix) \
1958 case X86::VPERMI2##Suffix##128rmb: case X86::VPERMT2##Suffix##128rmb: \
1959 case X86::VPERMI2##Suffix##256rmb: case X86::VPERMT2##Suffix##256rmb: \
1960 case X86::VPERMI2##Suffix##rmb: case X86::VPERMT2##Suffix##rmb: \
1961 case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \
1962 case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \
1963 case X86::VPERMI2##Suffix##rmbkz: case X86::VPERMT2##Suffix##rmbkz:
1964
1965 switch (Opcode) {
1966 default: return false;
1967 VPERM_CASES(B)
1968 VPERM_CASES_BROADCAST(D)
1969 VPERM_CASES_BROADCAST(PD)
1970 VPERM_CASES_BROADCAST(PS)
1971 VPERM_CASES_BROADCAST(Q)
1972 VPERM_CASES(W)
1973 return true;
1974 }
1975 #undef VPERM_CASES_BROADCAST
1976 #undef VPERM_CASES
1977 }
1978
1979 // Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching
1980 // from the I opcode to the T opcode and vice versa.
getCommutedVPERMV3Opcode(unsigned Opcode)1981 static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) {
1982 #define VPERM_CASES(Orig, New) \
1983 case X86::Orig##128rr: return X86::New##128rr; \
1984 case X86::Orig##128rrkz: return X86::New##128rrkz; \
1985 case X86::Orig##128rm: return X86::New##128rm; \
1986 case X86::Orig##128rmkz: return X86::New##128rmkz; \
1987 case X86::Orig##256rr: return X86::New##256rr; \
1988 case X86::Orig##256rrkz: return X86::New##256rrkz; \
1989 case X86::Orig##256rm: return X86::New##256rm; \
1990 case X86::Orig##256rmkz: return X86::New##256rmkz; \
1991 case X86::Orig##rr: return X86::New##rr; \
1992 case X86::Orig##rrkz: return X86::New##rrkz; \
1993 case X86::Orig##rm: return X86::New##rm; \
1994 case X86::Orig##rmkz: return X86::New##rmkz;
1995
1996 #define VPERM_CASES_BROADCAST(Orig, New) \
1997 VPERM_CASES(Orig, New) \
1998 case X86::Orig##128rmb: return X86::New##128rmb; \
1999 case X86::Orig##128rmbkz: return X86::New##128rmbkz; \
2000 case X86::Orig##256rmb: return X86::New##256rmb; \
2001 case X86::Orig##256rmbkz: return X86::New##256rmbkz; \
2002 case X86::Orig##rmb: return X86::New##rmb; \
2003 case X86::Orig##rmbkz: return X86::New##rmbkz;
2004
2005 switch (Opcode) {
2006 VPERM_CASES(VPERMI2B, VPERMT2B)
2007 VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D)
2008 VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD)
2009 VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS)
2010 VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q)
2011 VPERM_CASES(VPERMI2W, VPERMT2W)
2012 VPERM_CASES(VPERMT2B, VPERMI2B)
2013 VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D)
2014 VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD)
2015 VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS)
2016 VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q)
2017 VPERM_CASES(VPERMT2W, VPERMI2W)
2018 }
2019
2020 llvm_unreachable("Unreachable!");
2021 #undef VPERM_CASES_BROADCAST
2022 #undef VPERM_CASES
2023 }
2024
commuteInstructionImpl(MachineInstr & MI,bool NewMI,unsigned OpIdx1,unsigned OpIdx2) const2025 MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
2026 unsigned OpIdx1,
2027 unsigned OpIdx2) const {
2028 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
2029 if (NewMI)
2030 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
2031 return MI;
2032 };
2033
2034 switch (MI.getOpcode()) {
2035 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
2036 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
2037 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
2038 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
2039 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
2040 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
2041 unsigned Opc;
2042 unsigned Size;
2043 switch (MI.getOpcode()) {
2044 default: llvm_unreachable("Unreachable!");
2045 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
2046 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
2047 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
2048 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
2049 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
2050 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
2051 }
2052 unsigned Amt = MI.getOperand(3).getImm();
2053 auto &WorkingMI = cloneIfNew(MI);
2054 WorkingMI.setDesc(get(Opc));
2055 WorkingMI.getOperand(3).setImm(Size - Amt);
2056 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2057 OpIdx1, OpIdx2);
2058 }
2059 case X86::PFSUBrr:
2060 case X86::PFSUBRrr: {
2061 // PFSUB x, y: x = x - y
2062 // PFSUBR x, y: x = y - x
2063 unsigned Opc =
2064 (X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr);
2065 auto &WorkingMI = cloneIfNew(MI);
2066 WorkingMI.setDesc(get(Opc));
2067 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2068 OpIdx1, OpIdx2);
2069 }
2070 case X86::BLENDPDrri:
2071 case X86::BLENDPSrri:
2072 case X86::VBLENDPDrri:
2073 case X86::VBLENDPSrri:
2074 // If we're optimizing for size, try to use MOVSD/MOVSS.
2075 if (MI.getParent()->getParent()->getFunction().hasOptSize()) {
2076 unsigned Mask, Opc;
2077 switch (MI.getOpcode()) {
2078 default: llvm_unreachable("Unreachable!");
2079 case X86::BLENDPDrri: Opc = X86::MOVSDrr; Mask = 0x03; break;
2080 case X86::BLENDPSrri: Opc = X86::MOVSSrr; Mask = 0x0F; break;
2081 case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break;
2082 case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break;
2083 }
2084 if ((MI.getOperand(3).getImm() ^ Mask) == 1) {
2085 auto &WorkingMI = cloneIfNew(MI);
2086 WorkingMI.setDesc(get(Opc));
2087 WorkingMI.RemoveOperand(3);
2088 return TargetInstrInfo::commuteInstructionImpl(WorkingMI,
2089 /*NewMI=*/false,
2090 OpIdx1, OpIdx2);
2091 }
2092 }
2093 LLVM_FALLTHROUGH;
2094 case X86::PBLENDWrri:
2095 case X86::VBLENDPDYrri:
2096 case X86::VBLENDPSYrri:
2097 case X86::VPBLENDDrri:
2098 case X86::VPBLENDWrri:
2099 case X86::VPBLENDDYrri:
2100 case X86::VPBLENDWYrri:{
2101 int8_t Mask;
2102 switch (MI.getOpcode()) {
2103 default: llvm_unreachable("Unreachable!");
2104 case X86::BLENDPDrri: Mask = (int8_t)0x03; break;
2105 case X86::BLENDPSrri: Mask = (int8_t)0x0F; break;
2106 case X86::PBLENDWrri: Mask = (int8_t)0xFF; break;
2107 case X86::VBLENDPDrri: Mask = (int8_t)0x03; break;
2108 case X86::VBLENDPSrri: Mask = (int8_t)0x0F; break;
2109 case X86::VBLENDPDYrri: Mask = (int8_t)0x0F; break;
2110 case X86::VBLENDPSYrri: Mask = (int8_t)0xFF; break;
2111 case X86::VPBLENDDrri: Mask = (int8_t)0x0F; break;
2112 case X86::VPBLENDWrri: Mask = (int8_t)0xFF; break;
2113 case X86::VPBLENDDYrri: Mask = (int8_t)0xFF; break;
2114 case X86::VPBLENDWYrri: Mask = (int8_t)0xFF; break;
2115 }
2116 // Only the least significant bits of Imm are used.
2117 // Using int8_t to ensure it will be sign extended to the int64_t that
2118 // setImm takes in order to match isel behavior.
2119 int8_t Imm = MI.getOperand(3).getImm() & Mask;
2120 auto &WorkingMI = cloneIfNew(MI);
2121 WorkingMI.getOperand(3).setImm(Mask ^ Imm);
2122 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2123 OpIdx1, OpIdx2);
2124 }
2125 case X86::INSERTPSrr:
2126 case X86::VINSERTPSrr:
2127 case X86::VINSERTPSZrr: {
2128 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
2129 unsigned ZMask = Imm & 15;
2130 unsigned DstIdx = (Imm >> 4) & 3;
2131 unsigned SrcIdx = (Imm >> 6) & 3;
2132
2133 // We can commute insertps if we zero 2 of the elements, the insertion is
2134 // "inline" and we don't override the insertion with a zero.
2135 if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
2136 countPopulation(ZMask) == 2) {
2137 unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15);
2138 assert(AltIdx < 4 && "Illegal insertion index");
2139 unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
2140 auto &WorkingMI = cloneIfNew(MI);
2141 WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm);
2142 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2143 OpIdx1, OpIdx2);
2144 }
2145 return nullptr;
2146 }
2147 case X86::MOVSDrr:
2148 case X86::MOVSSrr:
2149 case X86::VMOVSDrr:
2150 case X86::VMOVSSrr:{
2151 // On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD.
2152 if (Subtarget.hasSSE41()) {
2153 unsigned Mask, Opc;
2154 switch (MI.getOpcode()) {
2155 default: llvm_unreachable("Unreachable!");
2156 case X86::MOVSDrr: Opc = X86::BLENDPDrri; Mask = 0x02; break;
2157 case X86::MOVSSrr: Opc = X86::BLENDPSrri; Mask = 0x0E; break;
2158 case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break;
2159 case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break;
2160 }
2161
2162 auto &WorkingMI = cloneIfNew(MI);
2163 WorkingMI.setDesc(get(Opc));
2164 WorkingMI.addOperand(MachineOperand::CreateImm(Mask));
2165 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2166 OpIdx1, OpIdx2);
2167 }
2168
2169 // Convert to SHUFPD.
2170 assert(MI.getOpcode() == X86::MOVSDrr &&
2171 "Can only commute MOVSDrr without SSE4.1");
2172
2173 auto &WorkingMI = cloneIfNew(MI);
2174 WorkingMI.setDesc(get(X86::SHUFPDrri));
2175 WorkingMI.addOperand(MachineOperand::CreateImm(0x02));
2176 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2177 OpIdx1, OpIdx2);
2178 }
2179 case X86::SHUFPDrri: {
2180 // Commute to MOVSD.
2181 assert(MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!");
2182 auto &WorkingMI = cloneIfNew(MI);
2183 WorkingMI.setDesc(get(X86::MOVSDrr));
2184 WorkingMI.RemoveOperand(3);
2185 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2186 OpIdx1, OpIdx2);
2187 }
2188 case X86::PCLMULQDQrr:
2189 case X86::VPCLMULQDQrr:
2190 case X86::VPCLMULQDQYrr:
2191 case X86::VPCLMULQDQZrr:
2192 case X86::VPCLMULQDQZ128rr:
2193 case X86::VPCLMULQDQZ256rr: {
2194 // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
2195 // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
2196 unsigned Imm = MI.getOperand(3).getImm();
2197 unsigned Src1Hi = Imm & 0x01;
2198 unsigned Src2Hi = Imm & 0x10;
2199 auto &WorkingMI = cloneIfNew(MI);
2200 WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
2201 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2202 OpIdx1, OpIdx2);
2203 }
2204 case X86::VPCMPBZ128rri: case X86::VPCMPUBZ128rri:
2205 case X86::VPCMPBZ256rri: case X86::VPCMPUBZ256rri:
2206 case X86::VPCMPBZrri: case X86::VPCMPUBZrri:
2207 case X86::VPCMPDZ128rri: case X86::VPCMPUDZ128rri:
2208 case X86::VPCMPDZ256rri: case X86::VPCMPUDZ256rri:
2209 case X86::VPCMPDZrri: case X86::VPCMPUDZrri:
2210 case X86::VPCMPQZ128rri: case X86::VPCMPUQZ128rri:
2211 case X86::VPCMPQZ256rri: case X86::VPCMPUQZ256rri:
2212 case X86::VPCMPQZrri: case X86::VPCMPUQZrri:
2213 case X86::VPCMPWZ128rri: case X86::VPCMPUWZ128rri:
2214 case X86::VPCMPWZ256rri: case X86::VPCMPUWZ256rri:
2215 case X86::VPCMPWZrri: case X86::VPCMPUWZrri:
2216 case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik:
2217 case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik:
2218 case X86::VPCMPBZrrik: case X86::VPCMPUBZrrik:
2219 case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik:
2220 case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik:
2221 case X86::VPCMPDZrrik: case X86::VPCMPUDZrrik:
2222 case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik:
2223 case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik:
2224 case X86::VPCMPQZrrik: case X86::VPCMPUQZrrik:
2225 case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik:
2226 case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik:
2227 case X86::VPCMPWZrrik: case X86::VPCMPUWZrrik: {
2228 // Flip comparison mode immediate (if necessary).
2229 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7;
2230 Imm = X86::getSwappedVPCMPImm(Imm);
2231 auto &WorkingMI = cloneIfNew(MI);
2232 WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm);
2233 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2234 OpIdx1, OpIdx2);
2235 }
2236 case X86::VPCOMBri: case X86::VPCOMUBri:
2237 case X86::VPCOMDri: case X86::VPCOMUDri:
2238 case X86::VPCOMQri: case X86::VPCOMUQri:
2239 case X86::VPCOMWri: case X86::VPCOMUWri: {
2240 // Flip comparison mode immediate (if necessary).
2241 unsigned Imm = MI.getOperand(3).getImm() & 0x7;
2242 Imm = X86::getSwappedVPCOMImm(Imm);
2243 auto &WorkingMI = cloneIfNew(MI);
2244 WorkingMI.getOperand(3).setImm(Imm);
2245 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2246 OpIdx1, OpIdx2);
2247 }
2248 case X86::VCMPSDZrr:
2249 case X86::VCMPSSZrr:
2250 case X86::VCMPPDZrri:
2251 case X86::VCMPPSZrri:
2252 case X86::VCMPSHZrr:
2253 case X86::VCMPPHZrri:
2254 case X86::VCMPPHZ128rri:
2255 case X86::VCMPPHZ256rri:
2256 case X86::VCMPPDZ128rri:
2257 case X86::VCMPPSZ128rri:
2258 case X86::VCMPPDZ256rri:
2259 case X86::VCMPPSZ256rri:
2260 case X86::VCMPPDZrrik:
2261 case X86::VCMPPSZrrik:
2262 case X86::VCMPPDZ128rrik:
2263 case X86::VCMPPSZ128rrik:
2264 case X86::VCMPPDZ256rrik:
2265 case X86::VCMPPSZ256rrik: {
2266 unsigned Imm =
2267 MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 0x1f;
2268 Imm = X86::getSwappedVCMPImm(Imm);
2269 auto &WorkingMI = cloneIfNew(MI);
2270 WorkingMI.getOperand(MI.getNumExplicitOperands() - 1).setImm(Imm);
2271 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2272 OpIdx1, OpIdx2);
2273 }
2274 case X86::VPERM2F128rr:
2275 case X86::VPERM2I128rr: {
2276 // Flip permute source immediate.
2277 // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
2278 // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
2279 int8_t Imm = MI.getOperand(3).getImm() & 0xFF;
2280 auto &WorkingMI = cloneIfNew(MI);
2281 WorkingMI.getOperand(3).setImm(Imm ^ 0x22);
2282 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2283 OpIdx1, OpIdx2);
2284 }
2285 case X86::MOVHLPSrr:
2286 case X86::UNPCKHPDrr:
2287 case X86::VMOVHLPSrr:
2288 case X86::VUNPCKHPDrr:
2289 case X86::VMOVHLPSZrr:
2290 case X86::VUNPCKHPDZ128rr: {
2291 assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!");
2292
2293 unsigned Opc = MI.getOpcode();
2294 switch (Opc) {
2295 default: llvm_unreachable("Unreachable!");
2296 case X86::MOVHLPSrr: Opc = X86::UNPCKHPDrr; break;
2297 case X86::UNPCKHPDrr: Opc = X86::MOVHLPSrr; break;
2298 case X86::VMOVHLPSrr: Opc = X86::VUNPCKHPDrr; break;
2299 case X86::VUNPCKHPDrr: Opc = X86::VMOVHLPSrr; break;
2300 case X86::VMOVHLPSZrr: Opc = X86::VUNPCKHPDZ128rr; break;
2301 case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr; break;
2302 }
2303 auto &WorkingMI = cloneIfNew(MI);
2304 WorkingMI.setDesc(get(Opc));
2305 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2306 OpIdx1, OpIdx2);
2307 }
2308 case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: {
2309 auto &WorkingMI = cloneIfNew(MI);
2310 unsigned OpNo = MI.getDesc().getNumOperands() - 1;
2311 X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm());
2312 WorkingMI.getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC));
2313 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2314 OpIdx1, OpIdx2);
2315 }
2316 case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi:
2317 case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi:
2318 case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi:
2319 case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi:
2320 case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi:
2321 case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi:
2322 case X86::VPTERNLOGDZrrik:
2323 case X86::VPTERNLOGDZ128rrik:
2324 case X86::VPTERNLOGDZ256rrik:
2325 case X86::VPTERNLOGQZrrik:
2326 case X86::VPTERNLOGQZ128rrik:
2327 case X86::VPTERNLOGQZ256rrik:
2328 case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz:
2329 case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
2330 case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
2331 case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz:
2332 case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
2333 case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
2334 case X86::VPTERNLOGDZ128rmbi:
2335 case X86::VPTERNLOGDZ256rmbi:
2336 case X86::VPTERNLOGDZrmbi:
2337 case X86::VPTERNLOGQZ128rmbi:
2338 case X86::VPTERNLOGQZ256rmbi:
2339 case X86::VPTERNLOGQZrmbi:
2340 case X86::VPTERNLOGDZ128rmbikz:
2341 case X86::VPTERNLOGDZ256rmbikz:
2342 case X86::VPTERNLOGDZrmbikz:
2343 case X86::VPTERNLOGQZ128rmbikz:
2344 case X86::VPTERNLOGQZ256rmbikz:
2345 case X86::VPTERNLOGQZrmbikz: {
2346 auto &WorkingMI = cloneIfNew(MI);
2347 commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2);
2348 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2349 OpIdx1, OpIdx2);
2350 }
2351 default: {
2352 if (isCommutableVPERMV3Instruction(MI.getOpcode())) {
2353 unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode());
2354 auto &WorkingMI = cloneIfNew(MI);
2355 WorkingMI.setDesc(get(Opc));
2356 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2357 OpIdx1, OpIdx2);
2358 }
2359
2360 const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
2361 MI.getDesc().TSFlags);
2362 if (FMA3Group) {
2363 unsigned Opc =
2364 getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group);
2365 auto &WorkingMI = cloneIfNew(MI);
2366 WorkingMI.setDesc(get(Opc));
2367 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2368 OpIdx1, OpIdx2);
2369 }
2370
2371 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2372 }
2373 }
2374 }
2375
2376 bool
findThreeSrcCommutedOpIndices(const MachineInstr & MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2,bool IsIntrinsic) const2377 X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI,
2378 unsigned &SrcOpIdx1,
2379 unsigned &SrcOpIdx2,
2380 bool IsIntrinsic) const {
2381 uint64_t TSFlags = MI.getDesc().TSFlags;
2382
2383 unsigned FirstCommutableVecOp = 1;
2384 unsigned LastCommutableVecOp = 3;
2385 unsigned KMaskOp = -1U;
2386 if (X86II::isKMasked(TSFlags)) {
2387 // For k-zero-masked operations it is Ok to commute the first vector
2388 // operand. Unless this is an intrinsic instruction.
2389 // For regular k-masked operations a conservative choice is done as the
2390 // elements of the first vector operand, for which the corresponding bit
2391 // in the k-mask operand is set to 0, are copied to the result of the
2392 // instruction.
2393 // TODO/FIXME: The commute still may be legal if it is known that the
2394 // k-mask operand is set to either all ones or all zeroes.
2395 // It is also Ok to commute the 1st operand if all users of MI use only
2396 // the elements enabled by the k-mask operand. For example,
2397 // v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i]
2398 // : v1[i];
2399 // VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 ->
2400 // // Ok, to commute v1 in FMADD213PSZrk.
2401
2402 // The k-mask operand has index = 2 for masked and zero-masked operations.
2403 KMaskOp = 2;
2404
2405 // The operand with index = 1 is used as a source for those elements for
2406 // which the corresponding bit in the k-mask is set to 0.
2407 if (X86II::isKMergeMasked(TSFlags) || IsIntrinsic)
2408 FirstCommutableVecOp = 3;
2409
2410 LastCommutableVecOp++;
2411 } else if (IsIntrinsic) {
2412 // Commuting the first operand of an intrinsic instruction isn't possible
2413 // unless we can prove that only the lowest element of the result is used.
2414 FirstCommutableVecOp = 2;
2415 }
2416
2417 if (isMem(MI, LastCommutableVecOp))
2418 LastCommutableVecOp--;
2419
2420 // Only the first RegOpsNum operands are commutable.
2421 // Also, the value 'CommuteAnyOperandIndex' is valid here as it means
2422 // that the operand is not specified/fixed.
2423 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2424 (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
2425 SrcOpIdx1 == KMaskOp))
2426 return false;
2427 if (SrcOpIdx2 != CommuteAnyOperandIndex &&
2428 (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
2429 SrcOpIdx2 == KMaskOp))
2430 return false;
2431
2432 // Look for two different register operands assumed to be commutable
2433 // regardless of the FMA opcode. The FMA opcode is adjusted later.
2434 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2435 SrcOpIdx2 == CommuteAnyOperandIndex) {
2436 unsigned CommutableOpIdx2 = SrcOpIdx2;
2437
2438 // At least one of operands to be commuted is not specified and
2439 // this method is free to choose appropriate commutable operands.
2440 if (SrcOpIdx1 == SrcOpIdx2)
2441 // Both of operands are not fixed. By default set one of commutable
2442 // operands to the last register operand of the instruction.
2443 CommutableOpIdx2 = LastCommutableVecOp;
2444 else if (SrcOpIdx2 == CommuteAnyOperandIndex)
2445 // Only one of operands is not fixed.
2446 CommutableOpIdx2 = SrcOpIdx1;
2447
2448 // CommutableOpIdx2 is well defined now. Let's choose another commutable
2449 // operand and assign its index to CommutableOpIdx1.
2450 Register Op2Reg = MI.getOperand(CommutableOpIdx2).getReg();
2451
2452 unsigned CommutableOpIdx1;
2453 for (CommutableOpIdx1 = LastCommutableVecOp;
2454 CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
2455 // Just ignore and skip the k-mask operand.
2456 if (CommutableOpIdx1 == KMaskOp)
2457 continue;
2458
2459 // The commuted operands must have different registers.
2460 // Otherwise, the commute transformation does not change anything and
2461 // is useless then.
2462 if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg())
2463 break;
2464 }
2465
2466 // No appropriate commutable operands were found.
2467 if (CommutableOpIdx1 < FirstCommutableVecOp)
2468 return false;
2469
2470 // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2
2471 // to return those values.
2472 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2473 CommutableOpIdx1, CommutableOpIdx2))
2474 return false;
2475 }
2476
2477 return true;
2478 }
2479
findCommutedOpIndices(const MachineInstr & MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2) const2480 bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
2481 unsigned &SrcOpIdx1,
2482 unsigned &SrcOpIdx2) const {
2483 const MCInstrDesc &Desc = MI.getDesc();
2484 if (!Desc.isCommutable())
2485 return false;
2486
2487 switch (MI.getOpcode()) {
2488 case X86::CMPSDrr:
2489 case X86::CMPSSrr:
2490 case X86::CMPPDrri:
2491 case X86::CMPPSrri:
2492 case X86::VCMPSDrr:
2493 case X86::VCMPSSrr:
2494 case X86::VCMPPDrri:
2495 case X86::VCMPPSrri:
2496 case X86::VCMPPDYrri:
2497 case X86::VCMPPSYrri:
2498 case X86::VCMPSDZrr:
2499 case X86::VCMPSSZrr:
2500 case X86::VCMPPDZrri:
2501 case X86::VCMPPSZrri:
2502 case X86::VCMPSHZrr:
2503 case X86::VCMPPHZrri:
2504 case X86::VCMPPHZ128rri:
2505 case X86::VCMPPHZ256rri:
2506 case X86::VCMPPDZ128rri:
2507 case X86::VCMPPSZ128rri:
2508 case X86::VCMPPDZ256rri:
2509 case X86::VCMPPSZ256rri:
2510 case X86::VCMPPDZrrik:
2511 case X86::VCMPPSZrrik:
2512 case X86::VCMPPDZ128rrik:
2513 case X86::VCMPPSZ128rrik:
2514 case X86::VCMPPDZ256rrik:
2515 case X86::VCMPPSZ256rrik: {
2516 unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0;
2517
2518 // Float comparison can be safely commuted for
2519 // Ordered/Unordered/Equal/NotEqual tests
2520 unsigned Imm = MI.getOperand(3 + OpOffset).getImm() & 0x7;
2521 switch (Imm) {
2522 default:
2523 // EVEX versions can be commuted.
2524 if ((Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX)
2525 break;
2526 return false;
2527 case 0x00: // EQUAL
2528 case 0x03: // UNORDERED
2529 case 0x04: // NOT EQUAL
2530 case 0x07: // ORDERED
2531 break;
2532 }
2533
2534 // The indices of the commutable operands are 1 and 2 (or 2 and 3
2535 // when masked).
2536 // Assign them to the returned operand indices here.
2537 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset,
2538 2 + OpOffset);
2539 }
2540 case X86::MOVSSrr:
2541 // X86::MOVSDrr is always commutable. MOVSS is only commutable if we can
2542 // form sse4.1 blend. We assume VMOVSSrr/VMOVSDrr is always commutable since
2543 // AVX implies sse4.1.
2544 if (Subtarget.hasSSE41())
2545 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2546 return false;
2547 case X86::SHUFPDrri:
2548 // We can commute this to MOVSD.
2549 if (MI.getOperand(3).getImm() == 0x02)
2550 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2551 return false;
2552 case X86::MOVHLPSrr:
2553 case X86::UNPCKHPDrr:
2554 case X86::VMOVHLPSrr:
2555 case X86::VUNPCKHPDrr:
2556 case X86::VMOVHLPSZrr:
2557 case X86::VUNPCKHPDZ128rr:
2558 if (Subtarget.hasSSE2())
2559 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2560 return false;
2561 case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi:
2562 case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi:
2563 case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi:
2564 case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi:
2565 case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi:
2566 case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi:
2567 case X86::VPTERNLOGDZrrik:
2568 case X86::VPTERNLOGDZ128rrik:
2569 case X86::VPTERNLOGDZ256rrik:
2570 case X86::VPTERNLOGQZrrik:
2571 case X86::VPTERNLOGQZ128rrik:
2572 case X86::VPTERNLOGQZ256rrik:
2573 case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz:
2574 case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
2575 case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
2576 case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz:
2577 case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
2578 case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
2579 case X86::VPTERNLOGDZ128rmbi:
2580 case X86::VPTERNLOGDZ256rmbi:
2581 case X86::VPTERNLOGDZrmbi:
2582 case X86::VPTERNLOGQZ128rmbi:
2583 case X86::VPTERNLOGQZ256rmbi:
2584 case X86::VPTERNLOGQZrmbi:
2585 case X86::VPTERNLOGDZ128rmbikz:
2586 case X86::VPTERNLOGDZ256rmbikz:
2587 case X86::VPTERNLOGDZrmbikz:
2588 case X86::VPTERNLOGQZ128rmbikz:
2589 case X86::VPTERNLOGQZ256rmbikz:
2590 case X86::VPTERNLOGQZrmbikz:
2591 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2592 case X86::VPDPWSSDYrr:
2593 case X86::VPDPWSSDrr:
2594 case X86::VPDPWSSDSYrr:
2595 case X86::VPDPWSSDSrr:
2596 case X86::VPDPWSSDZ128r:
2597 case X86::VPDPWSSDZ128rk:
2598 case X86::VPDPWSSDZ128rkz:
2599 case X86::VPDPWSSDZ256r:
2600 case X86::VPDPWSSDZ256rk:
2601 case X86::VPDPWSSDZ256rkz:
2602 case X86::VPDPWSSDZr:
2603 case X86::VPDPWSSDZrk:
2604 case X86::VPDPWSSDZrkz:
2605 case X86::VPDPWSSDSZ128r:
2606 case X86::VPDPWSSDSZ128rk:
2607 case X86::VPDPWSSDSZ128rkz:
2608 case X86::VPDPWSSDSZ256r:
2609 case X86::VPDPWSSDSZ256rk:
2610 case X86::VPDPWSSDSZ256rkz:
2611 case X86::VPDPWSSDSZr:
2612 case X86::VPDPWSSDSZrk:
2613 case X86::VPDPWSSDSZrkz:
2614 case X86::VPMADD52HUQZ128r:
2615 case X86::VPMADD52HUQZ128rk:
2616 case X86::VPMADD52HUQZ128rkz:
2617 case X86::VPMADD52HUQZ256r:
2618 case X86::VPMADD52HUQZ256rk:
2619 case X86::VPMADD52HUQZ256rkz:
2620 case X86::VPMADD52HUQZr:
2621 case X86::VPMADD52HUQZrk:
2622 case X86::VPMADD52HUQZrkz:
2623 case X86::VPMADD52LUQZ128r:
2624 case X86::VPMADD52LUQZ128rk:
2625 case X86::VPMADD52LUQZ128rkz:
2626 case X86::VPMADD52LUQZ256r:
2627 case X86::VPMADD52LUQZ256rk:
2628 case X86::VPMADD52LUQZ256rkz:
2629 case X86::VPMADD52LUQZr:
2630 case X86::VPMADD52LUQZrk:
2631 case X86::VPMADD52LUQZrkz:
2632 case X86::VFMADDCPHZr:
2633 case X86::VFMADDCPHZrk:
2634 case X86::VFMADDCPHZrkz:
2635 case X86::VFMADDCPHZ128r:
2636 case X86::VFMADDCPHZ128rk:
2637 case X86::VFMADDCPHZ128rkz:
2638 case X86::VFMADDCPHZ256r:
2639 case X86::VFMADDCPHZ256rk:
2640 case X86::VFMADDCPHZ256rkz:
2641 case X86::VFMADDCSHZr:
2642 case X86::VFMADDCSHZrk:
2643 case X86::VFMADDCSHZrkz: {
2644 unsigned CommutableOpIdx1 = 2;
2645 unsigned CommutableOpIdx2 = 3;
2646 if (X86II::isKMasked(Desc.TSFlags)) {
2647 // Skip the mask register.
2648 ++CommutableOpIdx1;
2649 ++CommutableOpIdx2;
2650 }
2651 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2652 CommutableOpIdx1, CommutableOpIdx2))
2653 return false;
2654 if (!MI.getOperand(SrcOpIdx1).isReg() ||
2655 !MI.getOperand(SrcOpIdx2).isReg())
2656 // No idea.
2657 return false;
2658 return true;
2659 }
2660
2661 default:
2662 const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
2663 MI.getDesc().TSFlags);
2664 if (FMA3Group)
2665 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2,
2666 FMA3Group->isIntrinsic());
2667
2668 // Handled masked instructions since we need to skip over the mask input
2669 // and the preserved input.
2670 if (X86II::isKMasked(Desc.TSFlags)) {
2671 // First assume that the first input is the mask operand and skip past it.
2672 unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1;
2673 unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2;
2674 // Check if the first input is tied. If there isn't one then we only
2675 // need to skip the mask operand which we did above.
2676 if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(),
2677 MCOI::TIED_TO) != -1)) {
2678 // If this is zero masking instruction with a tied operand, we need to
2679 // move the first index back to the first input since this must
2680 // be a 3 input instruction and we want the first two non-mask inputs.
2681 // Otherwise this is a 2 input instruction with a preserved input and
2682 // mask, so we need to move the indices to skip one more input.
2683 if (X86II::isKMergeMasked(Desc.TSFlags)) {
2684 ++CommutableOpIdx1;
2685 ++CommutableOpIdx2;
2686 } else {
2687 --CommutableOpIdx1;
2688 }
2689 }
2690
2691 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2692 CommutableOpIdx1, CommutableOpIdx2))
2693 return false;
2694
2695 if (!MI.getOperand(SrcOpIdx1).isReg() ||
2696 !MI.getOperand(SrcOpIdx2).isReg())
2697 // No idea.
2698 return false;
2699 return true;
2700 }
2701
2702 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2703 }
2704 return false;
2705 }
2706
isConvertibleLEA(MachineInstr * MI)2707 static bool isConvertibleLEA(MachineInstr *MI) {
2708 unsigned Opcode = MI->getOpcode();
2709 if (Opcode != X86::LEA32r && Opcode != X86::LEA64r &&
2710 Opcode != X86::LEA64_32r)
2711 return false;
2712
2713 const MachineOperand &Scale = MI->getOperand(1 + X86::AddrScaleAmt);
2714 const MachineOperand &Disp = MI->getOperand(1 + X86::AddrDisp);
2715 const MachineOperand &Segment = MI->getOperand(1 + X86::AddrSegmentReg);
2716
2717 if (Segment.getReg() != 0 || !Disp.isImm() || Disp.getImm() != 0 ||
2718 Scale.getImm() > 1)
2719 return false;
2720
2721 return true;
2722 }
2723
hasCommutePreference(MachineInstr & MI,bool & Commute) const2724 bool X86InstrInfo::hasCommutePreference(MachineInstr &MI, bool &Commute) const {
2725 // Currently we're interested in following sequence only.
2726 // r3 = lea r1, r2
2727 // r5 = add r3, r4
2728 // Both r3 and r4 are killed in add, we hope the add instruction has the
2729 // operand order
2730 // r5 = add r4, r3
2731 // So later in X86FixupLEAs the lea instruction can be rewritten as add.
2732 unsigned Opcode = MI.getOpcode();
2733 if (Opcode != X86::ADD32rr && Opcode != X86::ADD64rr)
2734 return false;
2735
2736 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2737 Register Reg1 = MI.getOperand(1).getReg();
2738 Register Reg2 = MI.getOperand(2).getReg();
2739
2740 // Check if Reg1 comes from LEA in the same MBB.
2741 if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg1)) {
2742 if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) {
2743 Commute = true;
2744 return true;
2745 }
2746 }
2747
2748 // Check if Reg2 comes from LEA in the same MBB.
2749 if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg2)) {
2750 if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) {
2751 Commute = false;
2752 return true;
2753 }
2754 }
2755
2756 return false;
2757 }
2758
getCondFromBranch(const MachineInstr & MI)2759 X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) {
2760 switch (MI.getOpcode()) {
2761 default: return X86::COND_INVALID;
2762 case X86::JCC_1:
2763 return static_cast<X86::CondCode>(
2764 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2765 }
2766 }
2767
2768 /// Return condition code of a SETCC opcode.
getCondFromSETCC(const MachineInstr & MI)2769 X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) {
2770 switch (MI.getOpcode()) {
2771 default: return X86::COND_INVALID;
2772 case X86::SETCCr: case X86::SETCCm:
2773 return static_cast<X86::CondCode>(
2774 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2775 }
2776 }
2777
2778 /// Return condition code of a CMov opcode.
getCondFromCMov(const MachineInstr & MI)2779 X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) {
2780 switch (MI.getOpcode()) {
2781 default: return X86::COND_INVALID;
2782 case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr:
2783 case X86::CMOV16rm: case X86::CMOV32rm: case X86::CMOV64rm:
2784 return static_cast<X86::CondCode>(
2785 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2786 }
2787 }
2788
2789 /// Return the inverse of the specified condition,
2790 /// e.g. turning COND_E to COND_NE.
GetOppositeBranchCondition(X86::CondCode CC)2791 X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
2792 switch (CC) {
2793 default: llvm_unreachable("Illegal condition code!");
2794 case X86::COND_E: return X86::COND_NE;
2795 case X86::COND_NE: return X86::COND_E;
2796 case X86::COND_L: return X86::COND_GE;
2797 case X86::COND_LE: return X86::COND_G;
2798 case X86::COND_G: return X86::COND_LE;
2799 case X86::COND_GE: return X86::COND_L;
2800 case X86::COND_B: return X86::COND_AE;
2801 case X86::COND_BE: return X86::COND_A;
2802 case X86::COND_A: return X86::COND_BE;
2803 case X86::COND_AE: return X86::COND_B;
2804 case X86::COND_S: return X86::COND_NS;
2805 case X86::COND_NS: return X86::COND_S;
2806 case X86::COND_P: return X86::COND_NP;
2807 case X86::COND_NP: return X86::COND_P;
2808 case X86::COND_O: return X86::COND_NO;
2809 case X86::COND_NO: return X86::COND_O;
2810 case X86::COND_NE_OR_P: return X86::COND_E_AND_NP;
2811 case X86::COND_E_AND_NP: return X86::COND_NE_OR_P;
2812 }
2813 }
2814
2815 /// Assuming the flags are set by MI(a,b), return the condition code if we
2816 /// modify the instructions such that flags are set by MI(b,a).
getSwappedCondition(X86::CondCode CC)2817 static X86::CondCode getSwappedCondition(X86::CondCode CC) {
2818 switch (CC) {
2819 default: return X86::COND_INVALID;
2820 case X86::COND_E: return X86::COND_E;
2821 case X86::COND_NE: return X86::COND_NE;
2822 case X86::COND_L: return X86::COND_G;
2823 case X86::COND_LE: return X86::COND_GE;
2824 case X86::COND_G: return X86::COND_L;
2825 case X86::COND_GE: return X86::COND_LE;
2826 case X86::COND_B: return X86::COND_A;
2827 case X86::COND_BE: return X86::COND_AE;
2828 case X86::COND_A: return X86::COND_B;
2829 case X86::COND_AE: return X86::COND_BE;
2830 }
2831 }
2832
2833 std::pair<X86::CondCode, bool>
getX86ConditionCode(CmpInst::Predicate Predicate)2834 X86::getX86ConditionCode(CmpInst::Predicate Predicate) {
2835 X86::CondCode CC = X86::COND_INVALID;
2836 bool NeedSwap = false;
2837 switch (Predicate) {
2838 default: break;
2839 // Floating-point Predicates
2840 case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
2841 case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH;
2842 case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
2843 case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH;
2844 case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
2845 case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH;
2846 case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
2847 case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH;
2848 case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
2849 case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
2850 case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
2851 case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
2852 case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH;
2853 case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
2854
2855 // Integer Predicates
2856 case CmpInst::ICMP_EQ: CC = X86::COND_E; break;
2857 case CmpInst::ICMP_NE: CC = X86::COND_NE; break;
2858 case CmpInst::ICMP_UGT: CC = X86::COND_A; break;
2859 case CmpInst::ICMP_UGE: CC = X86::COND_AE; break;
2860 case CmpInst::ICMP_ULT: CC = X86::COND_B; break;
2861 case CmpInst::ICMP_ULE: CC = X86::COND_BE; break;
2862 case CmpInst::ICMP_SGT: CC = X86::COND_G; break;
2863 case CmpInst::ICMP_SGE: CC = X86::COND_GE; break;
2864 case CmpInst::ICMP_SLT: CC = X86::COND_L; break;
2865 case CmpInst::ICMP_SLE: CC = X86::COND_LE; break;
2866 }
2867
2868 return std::make_pair(CC, NeedSwap);
2869 }
2870
2871 /// Return a setcc opcode based on whether it has memory operand.
getSETOpc(bool HasMemoryOperand)2872 unsigned X86::getSETOpc(bool HasMemoryOperand) {
2873 return HasMemoryOperand ? X86::SETCCr : X86::SETCCm;
2874 }
2875
2876 /// Return a cmov opcode for the given register size in bytes, and operand type.
getCMovOpcode(unsigned RegBytes,bool HasMemoryOperand)2877 unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand) {
2878 switch(RegBytes) {
2879 default: llvm_unreachable("Illegal register size!");
2880 case 2: return HasMemoryOperand ? X86::CMOV16rm : X86::CMOV16rr;
2881 case 4: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV32rr;
2882 case 8: return HasMemoryOperand ? X86::CMOV64rm : X86::CMOV64rr;
2883 }
2884 }
2885
2886 /// Get the VPCMP immediate for the given condition.
getVPCMPImmForCond(ISD::CondCode CC)2887 unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) {
2888 switch (CC) {
2889 default: llvm_unreachable("Unexpected SETCC condition");
2890 case ISD::SETNE: return 4;
2891 case ISD::SETEQ: return 0;
2892 case ISD::SETULT:
2893 case ISD::SETLT: return 1;
2894 case ISD::SETUGT:
2895 case ISD::SETGT: return 6;
2896 case ISD::SETUGE:
2897 case ISD::SETGE: return 5;
2898 case ISD::SETULE:
2899 case ISD::SETLE: return 2;
2900 }
2901 }
2902
2903 /// Get the VPCMP immediate if the operands are swapped.
getSwappedVPCMPImm(unsigned Imm)2904 unsigned X86::getSwappedVPCMPImm(unsigned Imm) {
2905 switch (Imm) {
2906 default: llvm_unreachable("Unreachable!");
2907 case 0x01: Imm = 0x06; break; // LT -> NLE
2908 case 0x02: Imm = 0x05; break; // LE -> NLT
2909 case 0x05: Imm = 0x02; break; // NLT -> LE
2910 case 0x06: Imm = 0x01; break; // NLE -> LT
2911 case 0x00: // EQ
2912 case 0x03: // FALSE
2913 case 0x04: // NE
2914 case 0x07: // TRUE
2915 break;
2916 }
2917
2918 return Imm;
2919 }
2920
2921 /// Get the VPCOM immediate if the operands are swapped.
getSwappedVPCOMImm(unsigned Imm)2922 unsigned X86::getSwappedVPCOMImm(unsigned Imm) {
2923 switch (Imm) {
2924 default: llvm_unreachable("Unreachable!");
2925 case 0x00: Imm = 0x02; break; // LT -> GT
2926 case 0x01: Imm = 0x03; break; // LE -> GE
2927 case 0x02: Imm = 0x00; break; // GT -> LT
2928 case 0x03: Imm = 0x01; break; // GE -> LE
2929 case 0x04: // EQ
2930 case 0x05: // NE
2931 case 0x06: // FALSE
2932 case 0x07: // TRUE
2933 break;
2934 }
2935
2936 return Imm;
2937 }
2938
2939 /// Get the VCMP immediate if the operands are swapped.
getSwappedVCMPImm(unsigned Imm)2940 unsigned X86::getSwappedVCMPImm(unsigned Imm) {
2941 // Only need the lower 2 bits to distinquish.
2942 switch (Imm & 0x3) {
2943 default: llvm_unreachable("Unreachable!");
2944 case 0x00: case 0x03:
2945 // EQ/NE/TRUE/FALSE/ORD/UNORD don't change immediate when commuted.
2946 break;
2947 case 0x01: case 0x02:
2948 // Need to toggle bits 3:0. Bit 4 stays the same.
2949 Imm ^= 0xf;
2950 break;
2951 }
2952
2953 return Imm;
2954 }
2955
isUnconditionalTailCall(const MachineInstr & MI) const2956 bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {
2957 switch (MI.getOpcode()) {
2958 case X86::TCRETURNdi:
2959 case X86::TCRETURNri:
2960 case X86::TCRETURNmi:
2961 case X86::TCRETURNdi64:
2962 case X86::TCRETURNri64:
2963 case X86::TCRETURNmi64:
2964 return true;
2965 default:
2966 return false;
2967 }
2968 }
2969
canMakeTailCallConditional(SmallVectorImpl<MachineOperand> & BranchCond,const MachineInstr & TailCall) const2970 bool X86InstrInfo::canMakeTailCallConditional(
2971 SmallVectorImpl<MachineOperand> &BranchCond,
2972 const MachineInstr &TailCall) const {
2973 if (TailCall.getOpcode() != X86::TCRETURNdi &&
2974 TailCall.getOpcode() != X86::TCRETURNdi64) {
2975 // Only direct calls can be done with a conditional branch.
2976 return false;
2977 }
2978
2979 const MachineFunction *MF = TailCall.getParent()->getParent();
2980 if (Subtarget.isTargetWin64() && MF->hasWinCFI()) {
2981 // Conditional tail calls confuse the Win64 unwinder.
2982 return false;
2983 }
2984
2985 assert(BranchCond.size() == 1);
2986 if (BranchCond[0].getImm() > X86::LAST_VALID_COND) {
2987 // Can't make a conditional tail call with this condition.
2988 return false;
2989 }
2990
2991 const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
2992 if (X86FI->getTCReturnAddrDelta() != 0 ||
2993 TailCall.getOperand(1).getImm() != 0) {
2994 // A conditional tail call cannot do any stack adjustment.
2995 return false;
2996 }
2997
2998 return true;
2999 }
3000
replaceBranchWithTailCall(MachineBasicBlock & MBB,SmallVectorImpl<MachineOperand> & BranchCond,const MachineInstr & TailCall) const3001 void X86InstrInfo::replaceBranchWithTailCall(
3002 MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
3003 const MachineInstr &TailCall) const {
3004 assert(canMakeTailCallConditional(BranchCond, TailCall));
3005
3006 MachineBasicBlock::iterator I = MBB.end();
3007 while (I != MBB.begin()) {
3008 --I;
3009 if (I->isDebugInstr())
3010 continue;
3011 if (!I->isBranch())
3012 assert(0 && "Can't find the branch to replace!");
3013
3014 X86::CondCode CC = X86::getCondFromBranch(*I);
3015 assert(BranchCond.size() == 1);
3016 if (CC != BranchCond[0].getImm())
3017 continue;
3018
3019 break;
3020 }
3021
3022 unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
3023 : X86::TCRETURNdi64cc;
3024
3025 auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc));
3026 MIB->addOperand(TailCall.getOperand(0)); // Destination.
3027 MIB.addImm(0); // Stack offset (not used).
3028 MIB->addOperand(BranchCond[0]); // Condition.
3029 MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters.
3030
3031 // Add implicit uses and defs of all live regs potentially clobbered by the
3032 // call. This way they still appear live across the call.
3033 LivePhysRegs LiveRegs(getRegisterInfo());
3034 LiveRegs.addLiveOuts(MBB);
3035 SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers;
3036 LiveRegs.stepForward(*MIB, Clobbers);
3037 for (const auto &C : Clobbers) {
3038 MIB.addReg(C.first, RegState::Implicit);
3039 MIB.addReg(C.first, RegState::Implicit | RegState::Define);
3040 }
3041
3042 I->eraseFromParent();
3043 }
3044
3045 // Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
3046 // not be a fallthrough MBB now due to layout changes). Return nullptr if the
3047 // fallthrough MBB cannot be identified.
getFallThroughMBB(MachineBasicBlock * MBB,MachineBasicBlock * TBB)3048 static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
3049 MachineBasicBlock *TBB) {
3050 // Look for non-EHPad successors other than TBB. If we find exactly one, it
3051 // is the fallthrough MBB. If we find zero, then TBB is both the target MBB
3052 // and fallthrough MBB. If we find more than one, we cannot identify the
3053 // fallthrough MBB and should return nullptr.
3054 MachineBasicBlock *FallthroughBB = nullptr;
3055 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) {
3056 if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB))
3057 continue;
3058 // Return a nullptr if we found more than one fallthrough successor.
3059 if (FallthroughBB && FallthroughBB != TBB)
3060 return nullptr;
3061 FallthroughBB = *SI;
3062 }
3063 return FallthroughBB;
3064 }
3065
AnalyzeBranchImpl(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,SmallVectorImpl<MachineInstr * > & CondBranches,bool AllowModify) const3066 bool X86InstrInfo::AnalyzeBranchImpl(
3067 MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
3068 SmallVectorImpl<MachineOperand> &Cond,
3069 SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const {
3070
3071 // Start from the bottom of the block and work up, examining the
3072 // terminator instructions.
3073 MachineBasicBlock::iterator I = MBB.end();
3074 MachineBasicBlock::iterator UnCondBrIter = MBB.end();
3075 while (I != MBB.begin()) {
3076 --I;
3077 if (I->isDebugInstr())
3078 continue;
3079
3080 // Working from the bottom, when we see a non-terminator instruction, we're
3081 // done.
3082 if (!isUnpredicatedTerminator(*I))
3083 break;
3084
3085 // A terminator that isn't a branch can't easily be handled by this
3086 // analysis.
3087 if (!I->isBranch())
3088 return true;
3089
3090 // Handle unconditional branches.
3091 if (I->getOpcode() == X86::JMP_1) {
3092 UnCondBrIter = I;
3093
3094 if (!AllowModify) {
3095 TBB = I->getOperand(0).getMBB();
3096 continue;
3097 }
3098
3099 // If the block has any instructions after a JMP, delete them.
3100 while (std::next(I) != MBB.end())
3101 std::next(I)->eraseFromParent();
3102
3103 Cond.clear();
3104 FBB = nullptr;
3105
3106 // Delete the JMP if it's equivalent to a fall-through.
3107 if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
3108 TBB = nullptr;
3109 I->eraseFromParent();
3110 I = MBB.end();
3111 UnCondBrIter = MBB.end();
3112 continue;
3113 }
3114
3115 // TBB is used to indicate the unconditional destination.
3116 TBB = I->getOperand(0).getMBB();
3117 continue;
3118 }
3119
3120 // Handle conditional branches.
3121 X86::CondCode BranchCode = X86::getCondFromBranch(*I);
3122 if (BranchCode == X86::COND_INVALID)
3123 return true; // Can't handle indirect branch.
3124
3125 // In practice we should never have an undef eflags operand, if we do
3126 // abort here as we are not prepared to preserve the flag.
3127 if (I->findRegisterUseOperand(X86::EFLAGS)->isUndef())
3128 return true;
3129
3130 // Working from the bottom, handle the first conditional branch.
3131 if (Cond.empty()) {
3132 MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
3133 if (AllowModify && UnCondBrIter != MBB.end() &&
3134 MBB.isLayoutSuccessor(TargetBB)) {
3135 // If we can modify the code and it ends in something like:
3136 //
3137 // jCC L1
3138 // jmp L2
3139 // L1:
3140 // ...
3141 // L2:
3142 //
3143 // Then we can change this to:
3144 //
3145 // jnCC L2
3146 // L1:
3147 // ...
3148 // L2:
3149 //
3150 // Which is a bit more efficient.
3151 // We conditionally jump to the fall-through block.
3152 BranchCode = GetOppositeBranchCondition(BranchCode);
3153 MachineBasicBlock::iterator OldInst = I;
3154
3155 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JCC_1))
3156 .addMBB(UnCondBrIter->getOperand(0).getMBB())
3157 .addImm(BranchCode);
3158 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1))
3159 .addMBB(TargetBB);
3160
3161 OldInst->eraseFromParent();
3162 UnCondBrIter->eraseFromParent();
3163
3164 // Restart the analysis.
3165 UnCondBrIter = MBB.end();
3166 I = MBB.end();
3167 continue;
3168 }
3169
3170 FBB = TBB;
3171 TBB = I->getOperand(0).getMBB();
3172 Cond.push_back(MachineOperand::CreateImm(BranchCode));
3173 CondBranches.push_back(&*I);
3174 continue;
3175 }
3176
3177 // Handle subsequent conditional branches. Only handle the case where all
3178 // conditional branches branch to the same destination and their condition
3179 // opcodes fit one of the special multi-branch idioms.
3180 assert(Cond.size() == 1);
3181 assert(TBB);
3182
3183 // If the conditions are the same, we can leave them alone.
3184 X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
3185 auto NewTBB = I->getOperand(0).getMBB();
3186 if (OldBranchCode == BranchCode && TBB == NewTBB)
3187 continue;
3188
3189 // If they differ, see if they fit one of the known patterns. Theoretically,
3190 // we could handle more patterns here, but we shouldn't expect to see them
3191 // if instruction selection has done a reasonable job.
3192 if (TBB == NewTBB &&
3193 ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) ||
3194 (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) {
3195 BranchCode = X86::COND_NE_OR_P;
3196 } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) ||
3197 (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) {
3198 if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB)))
3199 return true;
3200
3201 // X86::COND_E_AND_NP usually has two different branch destinations.
3202 //
3203 // JP B1
3204 // JE B2
3205 // JMP B1
3206 // B1:
3207 // B2:
3208 //
3209 // Here this condition branches to B2 only if NP && E. It has another
3210 // equivalent form:
3211 //
3212 // JNE B1
3213 // JNP B2
3214 // JMP B1
3215 // B1:
3216 // B2:
3217 //
3218 // Similarly it branches to B2 only if E && NP. That is why this condition
3219 // is named with COND_E_AND_NP.
3220 BranchCode = X86::COND_E_AND_NP;
3221 } else
3222 return true;
3223
3224 // Update the MachineOperand.
3225 Cond[0].setImm(BranchCode);
3226 CondBranches.push_back(&*I);
3227 }
3228
3229 return false;
3230 }
3231
analyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const3232 bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
3233 MachineBasicBlock *&TBB,
3234 MachineBasicBlock *&FBB,
3235 SmallVectorImpl<MachineOperand> &Cond,
3236 bool AllowModify) const {
3237 SmallVector<MachineInstr *, 4> CondBranches;
3238 return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
3239 }
3240
analyzeBranchPredicate(MachineBasicBlock & MBB,MachineBranchPredicate & MBP,bool AllowModify) const3241 bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
3242 MachineBranchPredicate &MBP,
3243 bool AllowModify) const {
3244 using namespace std::placeholders;
3245
3246 SmallVector<MachineOperand, 4> Cond;
3247 SmallVector<MachineInstr *, 4> CondBranches;
3248 if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches,
3249 AllowModify))
3250 return true;
3251
3252 if (Cond.size() != 1)
3253 return true;
3254
3255 assert(MBP.TrueDest && "expected!");
3256
3257 if (!MBP.FalseDest)
3258 MBP.FalseDest = MBB.getNextNode();
3259
3260 const TargetRegisterInfo *TRI = &getRegisterInfo();
3261
3262 MachineInstr *ConditionDef = nullptr;
3263 bool SingleUseCondition = true;
3264
3265 for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) {
3266 if (I->modifiesRegister(X86::EFLAGS, TRI)) {
3267 ConditionDef = &*I;
3268 break;
3269 }
3270
3271 if (I->readsRegister(X86::EFLAGS, TRI))
3272 SingleUseCondition = false;
3273 }
3274
3275 if (!ConditionDef)
3276 return true;
3277
3278 if (SingleUseCondition) {
3279 for (auto *Succ : MBB.successors())
3280 if (Succ->isLiveIn(X86::EFLAGS))
3281 SingleUseCondition = false;
3282 }
3283
3284 MBP.ConditionDef = ConditionDef;
3285 MBP.SingleUseCondition = SingleUseCondition;
3286
3287 // Currently we only recognize the simple pattern:
3288 //
3289 // test %reg, %reg
3290 // je %label
3291 //
3292 const unsigned TestOpcode =
3293 Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
3294
3295 if (ConditionDef->getOpcode() == TestOpcode &&
3296 ConditionDef->getNumOperands() == 3 &&
3297 ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) &&
3298 (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) {
3299 MBP.LHS = ConditionDef->getOperand(0);
3300 MBP.RHS = MachineOperand::CreateImm(0);
3301 MBP.Predicate = Cond[0].getImm() == X86::COND_NE
3302 ? MachineBranchPredicate::PRED_NE
3303 : MachineBranchPredicate::PRED_EQ;
3304 return false;
3305 }
3306
3307 return true;
3308 }
3309
removeBranch(MachineBasicBlock & MBB,int * BytesRemoved) const3310 unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB,
3311 int *BytesRemoved) const {
3312 assert(!BytesRemoved && "code size not handled");
3313
3314 MachineBasicBlock::iterator I = MBB.end();
3315 unsigned Count = 0;
3316
3317 while (I != MBB.begin()) {
3318 --I;
3319 if (I->isDebugInstr())
3320 continue;
3321 if (I->getOpcode() != X86::JMP_1 &&
3322 X86::getCondFromBranch(*I) == X86::COND_INVALID)
3323 break;
3324 // Remove the branch.
3325 I->eraseFromParent();
3326 I = MBB.end();
3327 ++Count;
3328 }
3329
3330 return Count;
3331 }
3332
insertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,ArrayRef<MachineOperand> Cond,const DebugLoc & DL,int * BytesAdded) const3333 unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB,
3334 MachineBasicBlock *TBB,
3335 MachineBasicBlock *FBB,
3336 ArrayRef<MachineOperand> Cond,
3337 const DebugLoc &DL,
3338 int *BytesAdded) const {
3339 // Shouldn't be a fall through.
3340 assert(TBB && "insertBranch must not be told to insert a fallthrough");
3341 assert((Cond.size() == 1 || Cond.size() == 0) &&
3342 "X86 branch conditions have one component!");
3343 assert(!BytesAdded && "code size not handled");
3344
3345 if (Cond.empty()) {
3346 // Unconditional branch?
3347 assert(!FBB && "Unconditional branch with multiple successors!");
3348 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB);
3349 return 1;
3350 }
3351
3352 // If FBB is null, it is implied to be a fall-through block.
3353 bool FallThru = FBB == nullptr;
3354
3355 // Conditional branch.
3356 unsigned Count = 0;
3357 X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
3358 switch (CC) {
3359 case X86::COND_NE_OR_P:
3360 // Synthesize NE_OR_P with two branches.
3361 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE);
3362 ++Count;
3363 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P);
3364 ++Count;
3365 break;
3366 case X86::COND_E_AND_NP:
3367 // Use the next block of MBB as FBB if it is null.
3368 if (FBB == nullptr) {
3369 FBB = getFallThroughMBB(&MBB, TBB);
3370 assert(FBB && "MBB cannot be the last block in function when the false "
3371 "body is a fall-through.");
3372 }
3373 // Synthesize COND_E_AND_NP with two branches.
3374 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE);
3375 ++Count;
3376 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP);
3377 ++Count;
3378 break;
3379 default: {
3380 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC);
3381 ++Count;
3382 }
3383 }
3384 if (!FallThru) {
3385 // Two-way Conditional branch. Insert the second branch.
3386 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
3387 ++Count;
3388 }
3389 return Count;
3390 }
3391
canInsertSelect(const MachineBasicBlock & MBB,ArrayRef<MachineOperand> Cond,Register DstReg,Register TrueReg,Register FalseReg,int & CondCycles,int & TrueCycles,int & FalseCycles) const3392 bool X86InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
3393 ArrayRef<MachineOperand> Cond,
3394 Register DstReg, Register TrueReg,
3395 Register FalseReg, int &CondCycles,
3396 int &TrueCycles, int &FalseCycles) const {
3397 // Not all subtargets have cmov instructions.
3398 if (!Subtarget.hasCMov())
3399 return false;
3400 if (Cond.size() != 1)
3401 return false;
3402 // We cannot do the composite conditions, at least not in SSA form.
3403 if ((X86::CondCode)Cond[0].getImm() > X86::LAST_VALID_COND)
3404 return false;
3405
3406 // Check register classes.
3407 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3408 const TargetRegisterClass *RC =
3409 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
3410 if (!RC)
3411 return false;
3412
3413 // We have cmov instructions for 16, 32, and 64 bit general purpose registers.
3414 if (X86::GR16RegClass.hasSubClassEq(RC) ||
3415 X86::GR32RegClass.hasSubClassEq(RC) ||
3416 X86::GR64RegClass.hasSubClassEq(RC)) {
3417 // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy
3418 // Bridge. Probably Ivy Bridge as well.
3419 CondCycles = 2;
3420 TrueCycles = 2;
3421 FalseCycles = 2;
3422 return true;
3423 }
3424
3425 // Can't do vectors.
3426 return false;
3427 }
3428
insertSelect(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register DstReg,ArrayRef<MachineOperand> Cond,Register TrueReg,Register FalseReg) const3429 void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
3430 MachineBasicBlock::iterator I,
3431 const DebugLoc &DL, Register DstReg,
3432 ArrayRef<MachineOperand> Cond, Register TrueReg,
3433 Register FalseReg) const {
3434 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3435 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
3436 const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
3437 assert(Cond.size() == 1 && "Invalid Cond array");
3438 unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8,
3439 false /*HasMemoryOperand*/);
3440 BuildMI(MBB, I, DL, get(Opc), DstReg)
3441 .addReg(FalseReg)
3442 .addReg(TrueReg)
3443 .addImm(Cond[0].getImm());
3444 }
3445
3446 /// Test if the given register is a physical h register.
isHReg(unsigned Reg)3447 static bool isHReg(unsigned Reg) {
3448 return X86::GR8_ABCD_HRegClass.contains(Reg);
3449 }
3450
3451 // Try and copy between VR128/VR64 and GR64 registers.
CopyToFromAsymmetricReg(unsigned DestReg,unsigned SrcReg,const X86Subtarget & Subtarget)3452 static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
3453 const X86Subtarget &Subtarget) {
3454 bool HasAVX = Subtarget.hasAVX();
3455 bool HasAVX512 = Subtarget.hasAVX512();
3456
3457 // SrcReg(MaskReg) -> DestReg(GR64)
3458 // SrcReg(MaskReg) -> DestReg(GR32)
3459
3460 // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3461 if (X86::VK16RegClass.contains(SrcReg)) {
3462 if (X86::GR64RegClass.contains(DestReg)) {
3463 assert(Subtarget.hasBWI());
3464 return X86::KMOVQrk;
3465 }
3466 if (X86::GR32RegClass.contains(DestReg))
3467 return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk;
3468 }
3469
3470 // SrcReg(GR64) -> DestReg(MaskReg)
3471 // SrcReg(GR32) -> DestReg(MaskReg)
3472
3473 // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3474 if (X86::VK16RegClass.contains(DestReg)) {
3475 if (X86::GR64RegClass.contains(SrcReg)) {
3476 assert(Subtarget.hasBWI());
3477 return X86::KMOVQkr;
3478 }
3479 if (X86::GR32RegClass.contains(SrcReg))
3480 return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr;
3481 }
3482
3483
3484 // SrcReg(VR128) -> DestReg(GR64)
3485 // SrcReg(VR64) -> DestReg(GR64)
3486 // SrcReg(GR64) -> DestReg(VR128)
3487 // SrcReg(GR64) -> DestReg(VR64)
3488
3489 if (X86::GR64RegClass.contains(DestReg)) {
3490 if (X86::VR128XRegClass.contains(SrcReg))
3491 // Copy from a VR128 register to a GR64 register.
3492 return HasAVX512 ? X86::VMOVPQIto64Zrr :
3493 HasAVX ? X86::VMOVPQIto64rr :
3494 X86::MOVPQIto64rr;
3495 if (X86::VR64RegClass.contains(SrcReg))
3496 // Copy from a VR64 register to a GR64 register.
3497 return X86::MMX_MOVD64from64rr;
3498 } else if (X86::GR64RegClass.contains(SrcReg)) {
3499 // Copy from a GR64 register to a VR128 register.
3500 if (X86::VR128XRegClass.contains(DestReg))
3501 return HasAVX512 ? X86::VMOV64toPQIZrr :
3502 HasAVX ? X86::VMOV64toPQIrr :
3503 X86::MOV64toPQIrr;
3504 // Copy from a GR64 register to a VR64 register.
3505 if (X86::VR64RegClass.contains(DestReg))
3506 return X86::MMX_MOVD64to64rr;
3507 }
3508
3509 // SrcReg(VR128) -> DestReg(GR32)
3510 // SrcReg(GR32) -> DestReg(VR128)
3511
3512 if (X86::GR32RegClass.contains(DestReg) &&
3513 X86::VR128XRegClass.contains(SrcReg))
3514 // Copy from a VR128 register to a GR32 register.
3515 return HasAVX512 ? X86::VMOVPDI2DIZrr :
3516 HasAVX ? X86::VMOVPDI2DIrr :
3517 X86::MOVPDI2DIrr;
3518
3519 if (X86::VR128XRegClass.contains(DestReg) &&
3520 X86::GR32RegClass.contains(SrcReg))
3521 // Copy from a VR128 register to a VR128 register.
3522 return HasAVX512 ? X86::VMOVDI2PDIZrr :
3523 HasAVX ? X86::VMOVDI2PDIrr :
3524 X86::MOVDI2PDIrr;
3525 return 0;
3526 }
3527
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const DebugLoc & DL,MCRegister DestReg,MCRegister SrcReg,bool KillSrc) const3528 void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
3529 MachineBasicBlock::iterator MI,
3530 const DebugLoc &DL, MCRegister DestReg,
3531 MCRegister SrcReg, bool KillSrc) const {
3532 // First deal with the normal symmetric copies.
3533 bool HasAVX = Subtarget.hasAVX();
3534 bool HasVLX = Subtarget.hasVLX();
3535 unsigned Opc = 0;
3536 if (X86::GR64RegClass.contains(DestReg, SrcReg))
3537 Opc = X86::MOV64rr;
3538 else if (X86::GR32RegClass.contains(DestReg, SrcReg))
3539 Opc = X86::MOV32rr;
3540 else if (X86::GR16RegClass.contains(DestReg, SrcReg))
3541 Opc = X86::MOV16rr;
3542 else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
3543 // Copying to or from a physical H register on x86-64 requires a NOREX
3544 // move. Otherwise use a normal move.
3545 if ((isHReg(DestReg) || isHReg(SrcReg)) &&
3546 Subtarget.is64Bit()) {
3547 Opc = X86::MOV8rr_NOREX;
3548 // Both operands must be encodable without an REX prefix.
3549 assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
3550 "8-bit H register can not be copied outside GR8_NOREX");
3551 } else
3552 Opc = X86::MOV8rr;
3553 }
3554 else if (X86::VR64RegClass.contains(DestReg, SrcReg))
3555 Opc = X86::MMX_MOVQ64rr;
3556 else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) {
3557 if (HasVLX)
3558 Opc = X86::VMOVAPSZ128rr;
3559 else if (X86::VR128RegClass.contains(DestReg, SrcReg))
3560 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
3561 else {
3562 // If this an extended register and we don't have VLX we need to use a
3563 // 512-bit move.
3564 Opc = X86::VMOVAPSZrr;
3565 const TargetRegisterInfo *TRI = &getRegisterInfo();
3566 DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm,
3567 &X86::VR512RegClass);
3568 SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm,
3569 &X86::VR512RegClass);
3570 }
3571 } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) {
3572 if (HasVLX)
3573 Opc = X86::VMOVAPSZ256rr;
3574 else if (X86::VR256RegClass.contains(DestReg, SrcReg))
3575 Opc = X86::VMOVAPSYrr;
3576 else {
3577 // If this an extended register and we don't have VLX we need to use a
3578 // 512-bit move.
3579 Opc = X86::VMOVAPSZrr;
3580 const TargetRegisterInfo *TRI = &getRegisterInfo();
3581 DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm,
3582 &X86::VR512RegClass);
3583 SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm,
3584 &X86::VR512RegClass);
3585 }
3586 } else if (X86::VR512RegClass.contains(DestReg, SrcReg))
3587 Opc = X86::VMOVAPSZrr;
3588 // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3589 else if (X86::VK16RegClass.contains(DestReg, SrcReg))
3590 Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk;
3591 if (!Opc)
3592 Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget);
3593
3594 if (Opc) {
3595 BuildMI(MBB, MI, DL, get(Opc), DestReg)
3596 .addReg(SrcReg, getKillRegState(KillSrc));
3597 return;
3598 }
3599
3600 if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
3601 // FIXME: We use a fatal error here because historically LLVM has tried
3602 // lower some of these physreg copies and we want to ensure we get
3603 // reasonable bug reports if someone encounters a case no other testing
3604 // found. This path should be removed after the LLVM 7 release.
3605 report_fatal_error("Unable to copy EFLAGS physical register!");
3606 }
3607
3608 LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
3609 << RI.getName(DestReg) << '\n');
3610 report_fatal_error("Cannot emit physreg copy instruction");
3611 }
3612
3613 Optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr & MI) const3614 X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
3615 if (MI.isMoveReg())
3616 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
3617 return None;
3618 }
3619
getLoadStoreRegOpcode(Register Reg,const TargetRegisterClass * RC,bool IsStackAligned,const X86Subtarget & STI,bool load)3620 static unsigned getLoadStoreRegOpcode(Register Reg,
3621 const TargetRegisterClass *RC,
3622 bool IsStackAligned,
3623 const X86Subtarget &STI, bool load) {
3624 bool HasAVX = STI.hasAVX();
3625 bool HasAVX512 = STI.hasAVX512();
3626 bool HasVLX = STI.hasVLX();
3627
3628 switch (STI.getRegisterInfo()->getSpillSize(*RC)) {
3629 default:
3630 llvm_unreachable("Unknown spill size");
3631 case 1:
3632 assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
3633 if (STI.is64Bit())
3634 // Copying to or from a physical H register on x86-64 requires a NOREX
3635 // move. Otherwise use a normal move.
3636 if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
3637 return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
3638 return load ? X86::MOV8rm : X86::MOV8mr;
3639 case 2:
3640 if (X86::VK16RegClass.hasSubClassEq(RC))
3641 return load ? X86::KMOVWkm : X86::KMOVWmk;
3642 if (X86::FR16XRegClass.hasSubClassEq(RC)) {
3643 assert(STI.hasFP16());
3644 return load ? X86::VMOVSHZrm_alt : X86::VMOVSHZmr;
3645 }
3646 assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
3647 return load ? X86::MOV16rm : X86::MOV16mr;
3648 case 4:
3649 if (X86::GR32RegClass.hasSubClassEq(RC))
3650 return load ? X86::MOV32rm : X86::MOV32mr;
3651 if (X86::FR32XRegClass.hasSubClassEq(RC))
3652 return load ?
3653 (HasAVX512 ? X86::VMOVSSZrm_alt :
3654 HasAVX ? X86::VMOVSSrm_alt :
3655 X86::MOVSSrm_alt) :
3656 (HasAVX512 ? X86::VMOVSSZmr :
3657 HasAVX ? X86::VMOVSSmr :
3658 X86::MOVSSmr);
3659 if (X86::RFP32RegClass.hasSubClassEq(RC))
3660 return load ? X86::LD_Fp32m : X86::ST_Fp32m;
3661 if (X86::VK32RegClass.hasSubClassEq(RC)) {
3662 assert(STI.hasBWI() && "KMOVD requires BWI");
3663 return load ? X86::KMOVDkm : X86::KMOVDmk;
3664 }
3665 // All of these mask pair classes have the same spill size, the same kind
3666 // of kmov instructions can be used with all of them.
3667 if (X86::VK1PAIRRegClass.hasSubClassEq(RC) ||
3668 X86::VK2PAIRRegClass.hasSubClassEq(RC) ||
3669 X86::VK4PAIRRegClass.hasSubClassEq(RC) ||
3670 X86::VK8PAIRRegClass.hasSubClassEq(RC) ||
3671 X86::VK16PAIRRegClass.hasSubClassEq(RC))
3672 return load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE;
3673 llvm_unreachable("Unknown 4-byte regclass");
3674 case 8:
3675 if (X86::GR64RegClass.hasSubClassEq(RC))
3676 return load ? X86::MOV64rm : X86::MOV64mr;
3677 if (X86::FR64XRegClass.hasSubClassEq(RC))
3678 return load ?
3679 (HasAVX512 ? X86::VMOVSDZrm_alt :
3680 HasAVX ? X86::VMOVSDrm_alt :
3681 X86::MOVSDrm_alt) :
3682 (HasAVX512 ? X86::VMOVSDZmr :
3683 HasAVX ? X86::VMOVSDmr :
3684 X86::MOVSDmr);
3685 if (X86::VR64RegClass.hasSubClassEq(RC))
3686 return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
3687 if (X86::RFP64RegClass.hasSubClassEq(RC))
3688 return load ? X86::LD_Fp64m : X86::ST_Fp64m;
3689 if (X86::VK64RegClass.hasSubClassEq(RC)) {
3690 assert(STI.hasBWI() && "KMOVQ requires BWI");
3691 return load ? X86::KMOVQkm : X86::KMOVQmk;
3692 }
3693 llvm_unreachable("Unknown 8-byte regclass");
3694 case 10:
3695 assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
3696 return load ? X86::LD_Fp80m : X86::ST_FpP80m;
3697 case 16: {
3698 if (X86::VR128XRegClass.hasSubClassEq(RC)) {
3699 // If stack is realigned we can use aligned stores.
3700 if (IsStackAligned)
3701 return load ?
3702 (HasVLX ? X86::VMOVAPSZ128rm :
3703 HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX :
3704 HasAVX ? X86::VMOVAPSrm :
3705 X86::MOVAPSrm):
3706 (HasVLX ? X86::VMOVAPSZ128mr :
3707 HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX :
3708 HasAVX ? X86::VMOVAPSmr :
3709 X86::MOVAPSmr);
3710 else
3711 return load ?
3712 (HasVLX ? X86::VMOVUPSZ128rm :
3713 HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX :
3714 HasAVX ? X86::VMOVUPSrm :
3715 X86::MOVUPSrm):
3716 (HasVLX ? X86::VMOVUPSZ128mr :
3717 HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX :
3718 HasAVX ? X86::VMOVUPSmr :
3719 X86::MOVUPSmr);
3720 }
3721 if (X86::BNDRRegClass.hasSubClassEq(RC)) {
3722 if (STI.is64Bit())
3723 return load ? X86::BNDMOV64rm : X86::BNDMOV64mr;
3724 else
3725 return load ? X86::BNDMOV32rm : X86::BNDMOV32mr;
3726 }
3727 llvm_unreachable("Unknown 16-byte regclass");
3728 }
3729 case 32:
3730 assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass");
3731 // If stack is realigned we can use aligned stores.
3732 if (IsStackAligned)
3733 return load ?
3734 (HasVLX ? X86::VMOVAPSZ256rm :
3735 HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX :
3736 X86::VMOVAPSYrm) :
3737 (HasVLX ? X86::VMOVAPSZ256mr :
3738 HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX :
3739 X86::VMOVAPSYmr);
3740 else
3741 return load ?
3742 (HasVLX ? X86::VMOVUPSZ256rm :
3743 HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX :
3744 X86::VMOVUPSYrm) :
3745 (HasVLX ? X86::VMOVUPSZ256mr :
3746 HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX :
3747 X86::VMOVUPSYmr);
3748 case 64:
3749 assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
3750 assert(STI.hasAVX512() && "Using 512-bit register requires AVX512");
3751 if (IsStackAligned)
3752 return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
3753 else
3754 return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
3755 }
3756 }
3757
3758 Optional<ExtAddrMode>
getAddrModeFromMemoryOp(const MachineInstr & MemI,const TargetRegisterInfo * TRI) const3759 X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
3760 const TargetRegisterInfo *TRI) const {
3761 const MCInstrDesc &Desc = MemI.getDesc();
3762 int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
3763 if (MemRefBegin < 0)
3764 return None;
3765
3766 MemRefBegin += X86II::getOperandBias(Desc);
3767
3768 auto &BaseOp = MemI.getOperand(MemRefBegin + X86::AddrBaseReg);
3769 if (!BaseOp.isReg()) // Can be an MO_FrameIndex
3770 return None;
3771
3772 const MachineOperand &DispMO = MemI.getOperand(MemRefBegin + X86::AddrDisp);
3773 // Displacement can be symbolic
3774 if (!DispMO.isImm())
3775 return None;
3776
3777 ExtAddrMode AM;
3778 AM.BaseReg = BaseOp.getReg();
3779 AM.ScaledReg = MemI.getOperand(MemRefBegin + X86::AddrIndexReg).getReg();
3780 AM.Scale = MemI.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm();
3781 AM.Displacement = DispMO.getImm();
3782 return AM;
3783 }
3784
getConstValDefinedInReg(const MachineInstr & MI,const Register Reg,int64_t & ImmVal) const3785 bool X86InstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
3786 const Register Reg,
3787 int64_t &ImmVal) const {
3788 if (MI.getOpcode() != X86::MOV32ri && MI.getOpcode() != X86::MOV64ri)
3789 return false;
3790 // Mov Src can be a global address.
3791 if (!MI.getOperand(1).isImm() || MI.getOperand(0).getReg() != Reg)
3792 return false;
3793 ImmVal = MI.getOperand(1).getImm();
3794 return true;
3795 }
3796
preservesZeroValueInReg(const MachineInstr * MI,const Register NullValueReg,const TargetRegisterInfo * TRI) const3797 bool X86InstrInfo::preservesZeroValueInReg(
3798 const MachineInstr *MI, const Register NullValueReg,
3799 const TargetRegisterInfo *TRI) const {
3800 if (!MI->modifiesRegister(NullValueReg, TRI))
3801 return true;
3802 switch (MI->getOpcode()) {
3803 // Shift right/left of a null unto itself is still a null, i.e. rax = shl rax
3804 // X.
3805 case X86::SHR64ri:
3806 case X86::SHR32ri:
3807 case X86::SHL64ri:
3808 case X86::SHL32ri:
3809 assert(MI->getOperand(0).isDef() && MI->getOperand(1).isUse() &&
3810 "expected for shift opcode!");
3811 return MI->getOperand(0).getReg() == NullValueReg &&
3812 MI->getOperand(1).getReg() == NullValueReg;
3813 // Zero extend of a sub-reg of NullValueReg into itself does not change the
3814 // null value.
3815 case X86::MOV32rr:
3816 return llvm::all_of(MI->operands(), [&](const MachineOperand &MO) {
3817 return TRI->isSubRegisterEq(NullValueReg, MO.getReg());
3818 });
3819 default:
3820 return false;
3821 }
3822 llvm_unreachable("Should be handled above!");
3823 }
3824
getMemOperandsWithOffsetWidth(const MachineInstr & MemOp,SmallVectorImpl<const MachineOperand * > & BaseOps,int64_t & Offset,bool & OffsetIsScalable,unsigned & Width,const TargetRegisterInfo * TRI) const3825 bool X86InstrInfo::getMemOperandsWithOffsetWidth(
3826 const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps,
3827 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
3828 const TargetRegisterInfo *TRI) const {
3829 const MCInstrDesc &Desc = MemOp.getDesc();
3830 int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
3831 if (MemRefBegin < 0)
3832 return false;
3833
3834 MemRefBegin += X86II::getOperandBias(Desc);
3835
3836 const MachineOperand *BaseOp =
3837 &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
3838 if (!BaseOp->isReg()) // Can be an MO_FrameIndex
3839 return false;
3840
3841 if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
3842 return false;
3843
3844 if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
3845 X86::NoRegister)
3846 return false;
3847
3848 const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp);
3849
3850 // Displacement can be symbolic
3851 if (!DispMO.isImm())
3852 return false;
3853
3854 Offset = DispMO.getImm();
3855
3856 if (!BaseOp->isReg())
3857 return false;
3858
3859 OffsetIsScalable = false;
3860 // FIXME: Relying on memoperands() may not be right thing to do here. Check
3861 // with X86 maintainers, and fix it accordingly. For now, it is ok, since
3862 // there is no use of `Width` for X86 back-end at the moment.
3863 Width =
3864 !MemOp.memoperands_empty() ? MemOp.memoperands().front()->getSize() : 0;
3865 BaseOps.push_back(BaseOp);
3866 return true;
3867 }
3868
getStoreRegOpcode(Register SrcReg,const TargetRegisterClass * RC,bool IsStackAligned,const X86Subtarget & STI)3869 static unsigned getStoreRegOpcode(Register SrcReg,
3870 const TargetRegisterClass *RC,
3871 bool IsStackAligned,
3872 const X86Subtarget &STI) {
3873 return getLoadStoreRegOpcode(SrcReg, RC, IsStackAligned, STI, false);
3874 }
3875
getLoadRegOpcode(Register DestReg,const TargetRegisterClass * RC,bool IsStackAligned,const X86Subtarget & STI)3876 static unsigned getLoadRegOpcode(Register DestReg,
3877 const TargetRegisterClass *RC,
3878 bool IsStackAligned, const X86Subtarget &STI) {
3879 return getLoadStoreRegOpcode(DestReg, RC, IsStackAligned, STI, true);
3880 }
3881
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,Register SrcReg,bool isKill,int FrameIdx,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const3882 void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
3883 MachineBasicBlock::iterator MI,
3884 Register SrcReg, bool isKill, int FrameIdx,
3885 const TargetRegisterClass *RC,
3886 const TargetRegisterInfo *TRI) const {
3887 const MachineFunction &MF = *MBB.getParent();
3888 const MachineFrameInfo &MFI = MF.getFrameInfo();
3889 assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
3890 "Stack slot too small for store");
3891 if (RC->getID() == X86::TILERegClassID) {
3892 unsigned Opc = X86::TILESTORED;
3893 // tilestored %tmm, (%sp, %idx)
3894 MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
3895 Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
3896 BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64);
3897 MachineInstr *NewMI =
3898 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
3899 .addReg(SrcReg, getKillRegState(isKill));
3900 MachineOperand &MO = NewMI->getOperand(2);
3901 MO.setReg(VirtReg);
3902 MO.setIsKill(true);
3903 } else {
3904 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
3905 bool isAligned =
3906 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
3907 (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
3908 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
3909 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
3910 .addReg(SrcReg, getKillRegState(isKill));
3911 }
3912 }
3913
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,Register DestReg,int FrameIdx,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const3914 void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
3915 MachineBasicBlock::iterator MI,
3916 Register DestReg, int FrameIdx,
3917 const TargetRegisterClass *RC,
3918 const TargetRegisterInfo *TRI) const {
3919 if (RC->getID() == X86::TILERegClassID) {
3920 unsigned Opc = X86::TILELOADD;
3921 // tileloadd (%sp, %idx), %tmm
3922 MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
3923 Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
3924 MachineInstr *NewMI =
3925 BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64);
3926 NewMI = addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg),
3927 FrameIdx);
3928 MachineOperand &MO = NewMI->getOperand(3);
3929 MO.setReg(VirtReg);
3930 MO.setIsKill(true);
3931 } else {
3932 const MachineFunction &MF = *MBB.getParent();
3933 const MachineFrameInfo &MFI = MF.getFrameInfo();
3934 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
3935 bool isAligned =
3936 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
3937 (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
3938 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
3939 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg),
3940 FrameIdx);
3941 }
3942 }
3943
analyzeCompare(const MachineInstr & MI,Register & SrcReg,Register & SrcReg2,int64_t & CmpMask,int64_t & CmpValue) const3944 bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
3945 Register &SrcReg2, int64_t &CmpMask,
3946 int64_t &CmpValue) const {
3947 switch (MI.getOpcode()) {
3948 default: break;
3949 case X86::CMP64ri32:
3950 case X86::CMP64ri8:
3951 case X86::CMP32ri:
3952 case X86::CMP32ri8:
3953 case X86::CMP16ri:
3954 case X86::CMP16ri8:
3955 case X86::CMP8ri:
3956 SrcReg = MI.getOperand(0).getReg();
3957 SrcReg2 = 0;
3958 if (MI.getOperand(1).isImm()) {
3959 CmpMask = ~0;
3960 CmpValue = MI.getOperand(1).getImm();
3961 } else {
3962 CmpMask = CmpValue = 0;
3963 }
3964 return true;
3965 // A SUB can be used to perform comparison.
3966 case X86::SUB64rm:
3967 case X86::SUB32rm:
3968 case X86::SUB16rm:
3969 case X86::SUB8rm:
3970 SrcReg = MI.getOperand(1).getReg();
3971 SrcReg2 = 0;
3972 CmpMask = 0;
3973 CmpValue = 0;
3974 return true;
3975 case X86::SUB64rr:
3976 case X86::SUB32rr:
3977 case X86::SUB16rr:
3978 case X86::SUB8rr:
3979 SrcReg = MI.getOperand(1).getReg();
3980 SrcReg2 = MI.getOperand(2).getReg();
3981 CmpMask = 0;
3982 CmpValue = 0;
3983 return true;
3984 case X86::SUB64ri32:
3985 case X86::SUB64ri8:
3986 case X86::SUB32ri:
3987 case X86::SUB32ri8:
3988 case X86::SUB16ri:
3989 case X86::SUB16ri8:
3990 case X86::SUB8ri:
3991 SrcReg = MI.getOperand(1).getReg();
3992 SrcReg2 = 0;
3993 if (MI.getOperand(2).isImm()) {
3994 CmpMask = ~0;
3995 CmpValue = MI.getOperand(2).getImm();
3996 } else {
3997 CmpMask = CmpValue = 0;
3998 }
3999 return true;
4000 case X86::CMP64rr:
4001 case X86::CMP32rr:
4002 case X86::CMP16rr:
4003 case X86::CMP8rr:
4004 SrcReg = MI.getOperand(0).getReg();
4005 SrcReg2 = MI.getOperand(1).getReg();
4006 CmpMask = 0;
4007 CmpValue = 0;
4008 return true;
4009 case X86::TEST8rr:
4010 case X86::TEST16rr:
4011 case X86::TEST32rr:
4012 case X86::TEST64rr:
4013 SrcReg = MI.getOperand(0).getReg();
4014 if (MI.getOperand(1).getReg() != SrcReg)
4015 return false;
4016 // Compare against zero.
4017 SrcReg2 = 0;
4018 CmpMask = ~0;
4019 CmpValue = 0;
4020 return true;
4021 }
4022 return false;
4023 }
4024
4025 /// Check whether the first instruction, whose only
4026 /// purpose is to update flags, can be made redundant.
4027 /// CMPrr can be made redundant by SUBrr if the operands are the same.
4028 /// This function can be extended later on.
4029 /// SrcReg, SrcRegs: register operands for FlagI.
4030 /// ImmValue: immediate for FlagI if it takes an immediate.
isRedundantFlagInstr(const MachineInstr & FlagI,Register SrcReg,Register SrcReg2,int64_t ImmMask,int64_t ImmValue,const MachineInstr & OI)4031 inline static bool isRedundantFlagInstr(const MachineInstr &FlagI,
4032 Register SrcReg, Register SrcReg2,
4033 int64_t ImmMask, int64_t ImmValue,
4034 const MachineInstr &OI) {
4035 if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) ||
4036 (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) ||
4037 (FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) ||
4038 (FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) &&
4039 ((OI.getOperand(1).getReg() == SrcReg &&
4040 OI.getOperand(2).getReg() == SrcReg2) ||
4041 (OI.getOperand(1).getReg() == SrcReg2 &&
4042 OI.getOperand(2).getReg() == SrcReg)))
4043 return true;
4044
4045 if (ImmMask != 0 &&
4046 ((FlagI.getOpcode() == X86::CMP64ri32 &&
4047 OI.getOpcode() == X86::SUB64ri32) ||
4048 (FlagI.getOpcode() == X86::CMP64ri8 &&
4049 OI.getOpcode() == X86::SUB64ri8) ||
4050 (FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) ||
4051 (FlagI.getOpcode() == X86::CMP32ri8 &&
4052 OI.getOpcode() == X86::SUB32ri8) ||
4053 (FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) ||
4054 (FlagI.getOpcode() == X86::CMP16ri8 &&
4055 OI.getOpcode() == X86::SUB16ri8) ||
4056 (FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) &&
4057 OI.getOperand(1).getReg() == SrcReg &&
4058 OI.getOperand(2).getImm() == ImmValue)
4059 return true;
4060 return false;
4061 }
4062
4063 /// Check whether the definition can be converted
4064 /// to remove a comparison against zero.
isDefConvertible(const MachineInstr & MI,bool & NoSignFlag,bool & ClearsOverflowFlag)4065 inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag,
4066 bool &ClearsOverflowFlag) {
4067 NoSignFlag = false;
4068 ClearsOverflowFlag = false;
4069
4070 switch (MI.getOpcode()) {
4071 default: return false;
4072
4073 // The shift instructions only modify ZF if their shift count is non-zero.
4074 // N.B.: The processor truncates the shift count depending on the encoding.
4075 case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri:
4076 case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri:
4077 return getTruncatedShiftCount(MI, 2) != 0;
4078
4079 // Some left shift instructions can be turned into LEA instructions but only
4080 // if their flags aren't used. Avoid transforming such instructions.
4081 case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{
4082 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
4083 if (isTruncatedShiftCountForLEA(ShAmt)) return false;
4084 return ShAmt != 0;
4085 }
4086
4087 case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8:
4088 case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8:
4089 return getTruncatedShiftCount(MI, 3) != 0;
4090
4091 case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri:
4092 case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8:
4093 case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr:
4094 case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm:
4095 case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm:
4096 case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r:
4097 case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
4098 case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8:
4099 case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr:
4100 case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm:
4101 case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm:
4102 case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r:
4103 case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri:
4104 case X86::ADC32ri8: case X86::ADC16ri: case X86::ADC16ri8:
4105 case X86::ADC8ri: case X86::ADC64rr: case X86::ADC32rr:
4106 case X86::ADC16rr: case X86::ADC8rr: case X86::ADC64rm:
4107 case X86::ADC32rm: case X86::ADC16rm: case X86::ADC8rm:
4108 case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri:
4109 case X86::SBB32ri8: case X86::SBB16ri: case X86::SBB16ri8:
4110 case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr:
4111 case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm:
4112 case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm:
4113 case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r:
4114 case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1:
4115 case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1:
4116 case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1:
4117 case X86::LZCNT16rr: case X86::LZCNT16rm:
4118 case X86::LZCNT32rr: case X86::LZCNT32rm:
4119 case X86::LZCNT64rr: case X86::LZCNT64rm:
4120 case X86::POPCNT16rr:case X86::POPCNT16rm:
4121 case X86::POPCNT32rr:case X86::POPCNT32rm:
4122 case X86::POPCNT64rr:case X86::POPCNT64rm:
4123 case X86::TZCNT16rr: case X86::TZCNT16rm:
4124 case X86::TZCNT32rr: case X86::TZCNT32rm:
4125 case X86::TZCNT64rr: case X86::TZCNT64rm:
4126 return true;
4127 case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri:
4128 case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8:
4129 case X86::AND8ri: case X86::AND64rr: case X86::AND32rr:
4130 case X86::AND16rr: case X86::AND8rr: case X86::AND64rm:
4131 case X86::AND32rm: case X86::AND16rm: case X86::AND8rm:
4132 case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri:
4133 case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8:
4134 case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr:
4135 case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm:
4136 case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm:
4137 case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri:
4138 case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8:
4139 case X86::OR8ri: case X86::OR64rr: case X86::OR32rr:
4140 case X86::OR16rr: case X86::OR8rr: case X86::OR64rm:
4141 case X86::OR32rm: case X86::OR16rm: case X86::OR8rm:
4142 case X86::ANDN32rr: case X86::ANDN32rm:
4143 case X86::ANDN64rr: case X86::ANDN64rm:
4144 case X86::BLSI32rr: case X86::BLSI32rm:
4145 case X86::BLSI64rr: case X86::BLSI64rm:
4146 case X86::BLSMSK32rr: case X86::BLSMSK32rm:
4147 case X86::BLSMSK64rr: case X86::BLSMSK64rm:
4148 case X86::BLSR32rr: case X86::BLSR32rm:
4149 case X86::BLSR64rr: case X86::BLSR64rm:
4150 case X86::BLCFILL32rr: case X86::BLCFILL32rm:
4151 case X86::BLCFILL64rr: case X86::BLCFILL64rm:
4152 case X86::BLCI32rr: case X86::BLCI32rm:
4153 case X86::BLCI64rr: case X86::BLCI64rm:
4154 case X86::BLCIC32rr: case X86::BLCIC32rm:
4155 case X86::BLCIC64rr: case X86::BLCIC64rm:
4156 case X86::BLCMSK32rr: case X86::BLCMSK32rm:
4157 case X86::BLCMSK64rr: case X86::BLCMSK64rm:
4158 case X86::BLCS32rr: case X86::BLCS32rm:
4159 case X86::BLCS64rr: case X86::BLCS64rm:
4160 case X86::BLSFILL32rr: case X86::BLSFILL32rm:
4161 case X86::BLSFILL64rr: case X86::BLSFILL64rm:
4162 case X86::BLSIC32rr: case X86::BLSIC32rm:
4163 case X86::BLSIC64rr: case X86::BLSIC64rm:
4164 case X86::BZHI32rr: case X86::BZHI32rm:
4165 case X86::BZHI64rr: case X86::BZHI64rm:
4166 case X86::T1MSKC32rr: case X86::T1MSKC32rm:
4167 case X86::T1MSKC64rr: case X86::T1MSKC64rm:
4168 case X86::TZMSK32rr: case X86::TZMSK32rm:
4169 case X86::TZMSK64rr: case X86::TZMSK64rm:
4170 // These instructions clear the overflow flag just like TEST.
4171 // FIXME: These are not the only instructions in this switch that clear the
4172 // overflow flag.
4173 ClearsOverflowFlag = true;
4174 return true;
4175 case X86::BEXTR32rr: case X86::BEXTR64rr:
4176 case X86::BEXTR32rm: case X86::BEXTR64rm:
4177 case X86::BEXTRI32ri: case X86::BEXTRI32mi:
4178 case X86::BEXTRI64ri: case X86::BEXTRI64mi:
4179 // BEXTR doesn't update the sign flag so we can't use it. It does clear
4180 // the overflow flag, but that's not useful without the sign flag.
4181 NoSignFlag = true;
4182 return true;
4183 }
4184 }
4185
4186 /// Check whether the use can be converted to remove a comparison against zero.
isUseDefConvertible(const MachineInstr & MI)4187 static X86::CondCode isUseDefConvertible(const MachineInstr &MI) {
4188 switch (MI.getOpcode()) {
4189 default: return X86::COND_INVALID;
4190 case X86::NEG8r:
4191 case X86::NEG16r:
4192 case X86::NEG32r:
4193 case X86::NEG64r:
4194 return X86::COND_AE;
4195 case X86::LZCNT16rr:
4196 case X86::LZCNT32rr:
4197 case X86::LZCNT64rr:
4198 return X86::COND_B;
4199 case X86::POPCNT16rr:
4200 case X86::POPCNT32rr:
4201 case X86::POPCNT64rr:
4202 return X86::COND_E;
4203 case X86::TZCNT16rr:
4204 case X86::TZCNT32rr:
4205 case X86::TZCNT64rr:
4206 return X86::COND_B;
4207 case X86::BSF16rr:
4208 case X86::BSF32rr:
4209 case X86::BSF64rr:
4210 case X86::BSR16rr:
4211 case X86::BSR32rr:
4212 case X86::BSR64rr:
4213 return X86::COND_E;
4214 case X86::BLSI32rr:
4215 case X86::BLSI64rr:
4216 return X86::COND_AE;
4217 case X86::BLSR32rr:
4218 case X86::BLSR64rr:
4219 case X86::BLSMSK32rr:
4220 case X86::BLSMSK64rr:
4221 return X86::COND_B;
4222 // TODO: TBM instructions.
4223 }
4224 }
4225
4226 /// Check if there exists an earlier instruction that
4227 /// operates on the same source operands and sets flags in the same way as
4228 /// Compare; remove Compare if possible.
optimizeCompareInstr(MachineInstr & CmpInstr,Register SrcReg,Register SrcReg2,int64_t CmpMask,int64_t CmpValue,const MachineRegisterInfo * MRI) const4229 bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
4230 Register SrcReg2, int64_t CmpMask,
4231 int64_t CmpValue,
4232 const MachineRegisterInfo *MRI) const {
4233 // Check whether we can replace SUB with CMP.
4234 switch (CmpInstr.getOpcode()) {
4235 default: break;
4236 case X86::SUB64ri32:
4237 case X86::SUB64ri8:
4238 case X86::SUB32ri:
4239 case X86::SUB32ri8:
4240 case X86::SUB16ri:
4241 case X86::SUB16ri8:
4242 case X86::SUB8ri:
4243 case X86::SUB64rm:
4244 case X86::SUB32rm:
4245 case X86::SUB16rm:
4246 case X86::SUB8rm:
4247 case X86::SUB64rr:
4248 case X86::SUB32rr:
4249 case X86::SUB16rr:
4250 case X86::SUB8rr: {
4251 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
4252 return false;
4253 // There is no use of the destination register, we can replace SUB with CMP.
4254 unsigned NewOpcode = 0;
4255 switch (CmpInstr.getOpcode()) {
4256 default: llvm_unreachable("Unreachable!");
4257 case X86::SUB64rm: NewOpcode = X86::CMP64rm; break;
4258 case X86::SUB32rm: NewOpcode = X86::CMP32rm; break;
4259 case X86::SUB16rm: NewOpcode = X86::CMP16rm; break;
4260 case X86::SUB8rm: NewOpcode = X86::CMP8rm; break;
4261 case X86::SUB64rr: NewOpcode = X86::CMP64rr; break;
4262 case X86::SUB32rr: NewOpcode = X86::CMP32rr; break;
4263 case X86::SUB16rr: NewOpcode = X86::CMP16rr; break;
4264 case X86::SUB8rr: NewOpcode = X86::CMP8rr; break;
4265 case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break;
4266 case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break;
4267 case X86::SUB32ri: NewOpcode = X86::CMP32ri; break;
4268 case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break;
4269 case X86::SUB16ri: NewOpcode = X86::CMP16ri; break;
4270 case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break;
4271 case X86::SUB8ri: NewOpcode = X86::CMP8ri; break;
4272 }
4273 CmpInstr.setDesc(get(NewOpcode));
4274 CmpInstr.RemoveOperand(0);
4275 // Mutating this instruction invalidates any debug data associated with it.
4276 CmpInstr.dropDebugNumber();
4277 // Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
4278 if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
4279 NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
4280 return false;
4281 }
4282 }
4283
4284 // Get the unique definition of SrcReg.
4285 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
4286 if (!MI) return false;
4287
4288 // CmpInstr is the first instruction of the BB.
4289 MachineBasicBlock::iterator I = CmpInstr, Def = MI;
4290
4291 // If we are comparing against zero, check whether we can use MI to update
4292 // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize.
4293 bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
4294 if (IsCmpZero && MI->getParent() != CmpInstr.getParent())
4295 return false;
4296
4297 // If we have a use of the source register between the def and our compare
4298 // instruction we can eliminate the compare iff the use sets EFLAGS in the
4299 // right way.
4300 bool ShouldUpdateCC = false;
4301 bool NoSignFlag = false;
4302 bool ClearsOverflowFlag = false;
4303 X86::CondCode NewCC = X86::COND_INVALID;
4304 if (IsCmpZero && !isDefConvertible(*MI, NoSignFlag, ClearsOverflowFlag)) {
4305 // Scan forward from the use until we hit the use we're looking for or the
4306 // compare instruction.
4307 for (MachineBasicBlock::iterator J = MI;; ++J) {
4308 // Do we have a convertible instruction?
4309 NewCC = isUseDefConvertible(*J);
4310 if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() &&
4311 J->getOperand(1).getReg() == SrcReg) {
4312 assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!");
4313 ShouldUpdateCC = true; // Update CC later on.
4314 // This is not a def of SrcReg, but still a def of EFLAGS. Keep going
4315 // with the new def.
4316 Def = J;
4317 MI = &*Def;
4318 break;
4319 }
4320
4321 if (J == I)
4322 return false;
4323 }
4324 }
4325
4326 // We are searching for an earlier instruction that can make CmpInstr
4327 // redundant and that instruction will be saved in Sub.
4328 MachineInstr *Sub = nullptr;
4329 const TargetRegisterInfo *TRI = &getRegisterInfo();
4330
4331 // We iterate backward, starting from the instruction before CmpInstr and
4332 // stop when reaching the definition of a source register or done with the BB.
4333 // RI points to the instruction before CmpInstr.
4334 // If the definition is in this basic block, RE points to the definition;
4335 // otherwise, RE is the rend of the basic block.
4336 MachineBasicBlock::reverse_iterator
4337 RI = ++I.getReverse(),
4338 RE = CmpInstr.getParent() == MI->getParent()
4339 ? Def.getReverse() /* points to MI */
4340 : CmpInstr.getParent()->rend();
4341 MachineInstr *Movr0Inst = nullptr;
4342 for (; RI != RE; ++RI) {
4343 MachineInstr &Instr = *RI;
4344 // Check whether CmpInstr can be made redundant by the current instruction.
4345 if (!IsCmpZero && isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask,
4346 CmpValue, Instr)) {
4347 Sub = &Instr;
4348 break;
4349 }
4350
4351 if (Instr.modifiesRegister(X86::EFLAGS, TRI) ||
4352 Instr.readsRegister(X86::EFLAGS, TRI)) {
4353 // This instruction modifies or uses EFLAGS.
4354
4355 // MOV32r0 etc. are implemented with xor which clobbers condition code.
4356 // They are safe to move up, if the definition to EFLAGS is dead and
4357 // earlier instructions do not read or write EFLAGS.
4358 if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 &&
4359 Instr.registerDefIsDead(X86::EFLAGS, TRI)) {
4360 Movr0Inst = &Instr;
4361 continue;
4362 }
4363
4364 // We can't remove CmpInstr.
4365 return false;
4366 }
4367 }
4368
4369 // Return false if no candidates exist.
4370 if (!IsCmpZero && !Sub)
4371 return false;
4372
4373 bool IsSwapped =
4374 (SrcReg2 != 0 && Sub && Sub->getOperand(1).getReg() == SrcReg2 &&
4375 Sub->getOperand(2).getReg() == SrcReg);
4376
4377 // Scan forward from the instruction after CmpInstr for uses of EFLAGS.
4378 // It is safe to remove CmpInstr if EFLAGS is redefined or killed.
4379 // If we are done with the basic block, we need to check whether EFLAGS is
4380 // live-out.
4381 bool IsSafe = false;
4382 SmallVector<std::pair<MachineInstr*, X86::CondCode>, 4> OpsToUpdate;
4383 MachineBasicBlock::iterator E = CmpInstr.getParent()->end();
4384 for (++I; I != E; ++I) {
4385 const MachineInstr &Instr = *I;
4386 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
4387 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI);
4388 // We should check the usage if this instruction uses and updates EFLAGS.
4389 if (!UseEFLAGS && ModifyEFLAGS) {
4390 // It is safe to remove CmpInstr if EFLAGS is updated again.
4391 IsSafe = true;
4392 break;
4393 }
4394 if (!UseEFLAGS && !ModifyEFLAGS)
4395 continue;
4396
4397 // EFLAGS is used by this instruction.
4398 X86::CondCode OldCC = X86::COND_INVALID;
4399 if (IsCmpZero || IsSwapped) {
4400 // We decode the condition code from opcode.
4401 if (Instr.isBranch())
4402 OldCC = X86::getCondFromBranch(Instr);
4403 else {
4404 OldCC = X86::getCondFromSETCC(Instr);
4405 if (OldCC == X86::COND_INVALID)
4406 OldCC = X86::getCondFromCMov(Instr);
4407 }
4408 if (OldCC == X86::COND_INVALID) return false;
4409 }
4410 X86::CondCode ReplacementCC = X86::COND_INVALID;
4411 if (IsCmpZero) {
4412 switch (OldCC) {
4413 default: break;
4414 case X86::COND_A: case X86::COND_AE:
4415 case X86::COND_B: case X86::COND_BE:
4416 // CF is used, we can't perform this optimization.
4417 return false;
4418 case X86::COND_G: case X86::COND_GE:
4419 case X86::COND_L: case X86::COND_LE:
4420 case X86::COND_O: case X86::COND_NO:
4421 // If OF is used, the instruction needs to clear it like CmpZero does.
4422 if (!ClearsOverflowFlag)
4423 return false;
4424 break;
4425 case X86::COND_S: case X86::COND_NS:
4426 // If SF is used, but the instruction doesn't update the SF, then we
4427 // can't do the optimization.
4428 if (NoSignFlag)
4429 return false;
4430 break;
4431 }
4432
4433 // If we're updating the condition code check if we have to reverse the
4434 // condition.
4435 if (ShouldUpdateCC)
4436 switch (OldCC) {
4437 default:
4438 return false;
4439 case X86::COND_E:
4440 ReplacementCC = NewCC;
4441 break;
4442 case X86::COND_NE:
4443 ReplacementCC = GetOppositeBranchCondition(NewCC);
4444 break;
4445 }
4446 } else if (IsSwapped) {
4447 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
4448 // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
4449 // We swap the condition code and synthesize the new opcode.
4450 ReplacementCC = getSwappedCondition(OldCC);
4451 if (ReplacementCC == X86::COND_INVALID) return false;
4452 }
4453
4454 if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) {
4455 // Push the MachineInstr to OpsToUpdate.
4456 // If it is safe to remove CmpInstr, the condition code of these
4457 // instructions will be modified.
4458 OpsToUpdate.push_back(std::make_pair(&*I, ReplacementCC));
4459 }
4460 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
4461 // It is safe to remove CmpInstr if EFLAGS is updated again or killed.
4462 IsSafe = true;
4463 break;
4464 }
4465 }
4466
4467 // If EFLAGS is not killed nor re-defined, we should check whether it is
4468 // live-out. If it is live-out, do not optimize.
4469 if ((IsCmpZero || IsSwapped) && !IsSafe) {
4470 MachineBasicBlock *MBB = CmpInstr.getParent();
4471 for (MachineBasicBlock *Successor : MBB->successors())
4472 if (Successor->isLiveIn(X86::EFLAGS))
4473 return false;
4474 }
4475
4476 // The instruction to be updated is either Sub or MI.
4477 Sub = IsCmpZero ? MI : Sub;
4478 // Move Movr0Inst to the appropriate place before Sub.
4479 if (Movr0Inst) {
4480 // Look backwards until we find a def that doesn't use the current EFLAGS.
4481 Def = Sub;
4482 MachineBasicBlock::reverse_iterator InsertI = Def.getReverse(),
4483 InsertE = Sub->getParent()->rend();
4484 for (; InsertI != InsertE; ++InsertI) {
4485 MachineInstr *Instr = &*InsertI;
4486 if (!Instr->readsRegister(X86::EFLAGS, TRI) &&
4487 Instr->modifiesRegister(X86::EFLAGS, TRI)) {
4488 Sub->getParent()->remove(Movr0Inst);
4489 Instr->getParent()->insert(MachineBasicBlock::iterator(Instr),
4490 Movr0Inst);
4491 break;
4492 }
4493 }
4494 if (InsertI == InsertE)
4495 return false;
4496 }
4497
4498 // Make sure Sub instruction defines EFLAGS and mark the def live.
4499 MachineOperand *FlagDef = Sub->findRegisterDefOperand(X86::EFLAGS);
4500 assert(FlagDef && "Unable to locate a def EFLAGS operand");
4501 FlagDef->setIsDead(false);
4502
4503 CmpInstr.eraseFromParent();
4504
4505 // Modify the condition code of instructions in OpsToUpdate.
4506 for (auto &Op : OpsToUpdate) {
4507 Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1)
4508 .setImm(Op.second);
4509 }
4510 return true;
4511 }
4512
4513 /// Try to remove the load by folding it to a register
4514 /// operand at the use. We fold the load instructions if load defines a virtual
4515 /// register, the virtual register is used once in the same BB, and the
4516 /// instructions in-between do not load or store, and have no side effects.
optimizeLoadInstr(MachineInstr & MI,const MachineRegisterInfo * MRI,Register & FoldAsLoadDefReg,MachineInstr * & DefMI) const4517 MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
4518 const MachineRegisterInfo *MRI,
4519 Register &FoldAsLoadDefReg,
4520 MachineInstr *&DefMI) const {
4521 // Check whether we can move DefMI here.
4522 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
4523 assert(DefMI);
4524 bool SawStore = false;
4525 if (!DefMI->isSafeToMove(nullptr, SawStore))
4526 return nullptr;
4527
4528 // Collect information about virtual register operands of MI.
4529 SmallVector<unsigned, 1> SrcOperandIds;
4530 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
4531 MachineOperand &MO = MI.getOperand(i);
4532 if (!MO.isReg())
4533 continue;
4534 Register Reg = MO.getReg();
4535 if (Reg != FoldAsLoadDefReg)
4536 continue;
4537 // Do not fold if we have a subreg use or a def.
4538 if (MO.getSubReg() || MO.isDef())
4539 return nullptr;
4540 SrcOperandIds.push_back(i);
4541 }
4542 if (SrcOperandIds.empty())
4543 return nullptr;
4544
4545 // Check whether we can fold the def into SrcOperandId.
4546 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
4547 FoldAsLoadDefReg = 0;
4548 return FoldMI;
4549 }
4550
4551 return nullptr;
4552 }
4553
4554 /// Expand a single-def pseudo instruction to a two-addr
4555 /// instruction with two undef reads of the register being defined.
4556 /// This is used for mapping:
4557 /// %xmm4 = V_SET0
4558 /// to:
4559 /// %xmm4 = PXORrr undef %xmm4, undef %xmm4
4560 ///
Expand2AddrUndef(MachineInstrBuilder & MIB,const MCInstrDesc & Desc)4561 static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
4562 const MCInstrDesc &Desc) {
4563 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
4564 Register Reg = MIB.getReg(0);
4565 MIB->setDesc(Desc);
4566
4567 // MachineInstr::addOperand() will insert explicit operands before any
4568 // implicit operands.
4569 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
4570 // But we don't trust that.
4571 assert(MIB.getReg(1) == Reg &&
4572 MIB.getReg(2) == Reg && "Misplaced operand");
4573 return true;
4574 }
4575
4576 /// Expand a single-def pseudo instruction to a two-addr
4577 /// instruction with two %k0 reads.
4578 /// This is used for mapping:
4579 /// %k4 = K_SET1
4580 /// to:
4581 /// %k4 = KXNORrr %k0, %k0
Expand2AddrKreg(MachineInstrBuilder & MIB,const MCInstrDesc & Desc,Register Reg)4582 static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc,
4583 Register Reg) {
4584 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
4585 MIB->setDesc(Desc);
4586 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
4587 return true;
4588 }
4589
expandMOV32r1(MachineInstrBuilder & MIB,const TargetInstrInfo & TII,bool MinusOne)4590 static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
4591 bool MinusOne) {
4592 MachineBasicBlock &MBB = *MIB->getParent();
4593 const DebugLoc &DL = MIB->getDebugLoc();
4594 Register Reg = MIB.getReg(0);
4595
4596 // Insert the XOR.
4597 BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg)
4598 .addReg(Reg, RegState::Undef)
4599 .addReg(Reg, RegState::Undef);
4600
4601 // Turn the pseudo into an INC or DEC.
4602 MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
4603 MIB.addReg(Reg);
4604
4605 return true;
4606 }
4607
ExpandMOVImmSExti8(MachineInstrBuilder & MIB,const TargetInstrInfo & TII,const X86Subtarget & Subtarget)4608 static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
4609 const TargetInstrInfo &TII,
4610 const X86Subtarget &Subtarget) {
4611 MachineBasicBlock &MBB = *MIB->getParent();
4612 const DebugLoc &DL = MIB->getDebugLoc();
4613 int64_t Imm = MIB->getOperand(1).getImm();
4614 assert(Imm != 0 && "Using push/pop for 0 is not efficient.");
4615 MachineBasicBlock::iterator I = MIB.getInstr();
4616
4617 int StackAdjustment;
4618
4619 if (Subtarget.is64Bit()) {
4620 assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||
4621 MIB->getOpcode() == X86::MOV32ImmSExti8);
4622
4623 // Can't use push/pop lowering if the function might write to the red zone.
4624 X86MachineFunctionInfo *X86FI =
4625 MBB.getParent()->getInfo<X86MachineFunctionInfo>();
4626 if (X86FI->getUsesRedZone()) {
4627 MIB->setDesc(TII.get(MIB->getOpcode() ==
4628 X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri));
4629 return true;
4630 }
4631
4632 // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and
4633 // widen the register if necessary.
4634 StackAdjustment = 8;
4635 BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm);
4636 MIB->setDesc(TII.get(X86::POP64r));
4637 MIB->getOperand(0)
4638 .setReg(getX86SubSuperRegister(MIB.getReg(0), 64));
4639 } else {
4640 assert(MIB->getOpcode() == X86::MOV32ImmSExti8);
4641 StackAdjustment = 4;
4642 BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm);
4643 MIB->setDesc(TII.get(X86::POP32r));
4644 }
4645 MIB->RemoveOperand(1);
4646 MIB->addImplicitDefUseOperands(*MBB.getParent());
4647
4648 // Build CFI if necessary.
4649 MachineFunction &MF = *MBB.getParent();
4650 const X86FrameLowering *TFL = Subtarget.getFrameLowering();
4651 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
4652 bool NeedsDwarfCFI = !IsWin64Prologue && MF.needsFrameMoves();
4653 bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
4654 if (EmitCFI) {
4655 TFL->BuildCFI(MBB, I, DL,
4656 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
4657 TFL->BuildCFI(MBB, std::next(I), DL,
4658 MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment));
4659 }
4660
4661 return true;
4662 }
4663
4664 // LoadStackGuard has so far only been implemented for 64-bit MachO. Different
4665 // code sequence is needed for other targets.
expandLoadStackGuard(MachineInstrBuilder & MIB,const TargetInstrInfo & TII)4666 static void expandLoadStackGuard(MachineInstrBuilder &MIB,
4667 const TargetInstrInfo &TII) {
4668 MachineBasicBlock &MBB = *MIB->getParent();
4669 const DebugLoc &DL = MIB->getDebugLoc();
4670 Register Reg = MIB.getReg(0);
4671 const GlobalValue *GV =
4672 cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
4673 auto Flags = MachineMemOperand::MOLoad |
4674 MachineMemOperand::MODereferenceable |
4675 MachineMemOperand::MOInvariant;
4676 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
4677 MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, Align(8));
4678 MachineBasicBlock::iterator I = MIB.getInstr();
4679
4680 BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
4681 .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0)
4682 .addMemOperand(MMO);
4683 MIB->setDebugLoc(DL);
4684 MIB->setDesc(TII.get(X86::MOV64rm));
4685 MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
4686 }
4687
expandXorFP(MachineInstrBuilder & MIB,const TargetInstrInfo & TII)4688 static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) {
4689 MachineBasicBlock &MBB = *MIB->getParent();
4690 MachineFunction &MF = *MBB.getParent();
4691 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
4692 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4693 unsigned XorOp =
4694 MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
4695 MIB->setDesc(TII.get(XorOp));
4696 MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef);
4697 return true;
4698 }
4699
4700 // This is used to handle spills for 128/256-bit registers when we have AVX512,
4701 // but not VLX. If it uses an extended register we need to use an instruction
4702 // that loads the lower 128/256-bit, but is available with only AVX512F.
expandNOVLXLoad(MachineInstrBuilder & MIB,const TargetRegisterInfo * TRI,const MCInstrDesc & LoadDesc,const MCInstrDesc & BroadcastDesc,unsigned SubIdx)4703 static bool expandNOVLXLoad(MachineInstrBuilder &MIB,
4704 const TargetRegisterInfo *TRI,
4705 const MCInstrDesc &LoadDesc,
4706 const MCInstrDesc &BroadcastDesc,
4707 unsigned SubIdx) {
4708 Register DestReg = MIB.getReg(0);
4709 // Check if DestReg is XMM16-31 or YMM16-31.
4710 if (TRI->getEncodingValue(DestReg) < 16) {
4711 // We can use a normal VEX encoded load.
4712 MIB->setDesc(LoadDesc);
4713 } else {
4714 // Use a 128/256-bit VBROADCAST instruction.
4715 MIB->setDesc(BroadcastDesc);
4716 // Change the destination to a 512-bit register.
4717 DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
4718 MIB->getOperand(0).setReg(DestReg);
4719 }
4720 return true;
4721 }
4722
4723 // This is used to handle spills for 128/256-bit registers when we have AVX512,
4724 // but not VLX. If it uses an extended register we need to use an instruction
4725 // that stores the lower 128/256-bit, but is available with only AVX512F.
expandNOVLXStore(MachineInstrBuilder & MIB,const TargetRegisterInfo * TRI,const MCInstrDesc & StoreDesc,const MCInstrDesc & ExtractDesc,unsigned SubIdx)4726 static bool expandNOVLXStore(MachineInstrBuilder &MIB,
4727 const TargetRegisterInfo *TRI,
4728 const MCInstrDesc &StoreDesc,
4729 const MCInstrDesc &ExtractDesc,
4730 unsigned SubIdx) {
4731 Register SrcReg = MIB.getReg(X86::AddrNumOperands);
4732 // Check if DestReg is XMM16-31 or YMM16-31.
4733 if (TRI->getEncodingValue(SrcReg) < 16) {
4734 // We can use a normal VEX encoded store.
4735 MIB->setDesc(StoreDesc);
4736 } else {
4737 // Use a VEXTRACTF instruction.
4738 MIB->setDesc(ExtractDesc);
4739 // Change the destination to a 512-bit register.
4740 SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
4741 MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg);
4742 MIB.addImm(0x0); // Append immediate to extract from the lower bits.
4743 }
4744
4745 return true;
4746 }
4747
expandSHXDROT(MachineInstrBuilder & MIB,const MCInstrDesc & Desc)4748 static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) {
4749 MIB->setDesc(Desc);
4750 int64_t ShiftAmt = MIB->getOperand(2).getImm();
4751 // Temporarily remove the immediate so we can add another source register.
4752 MIB->RemoveOperand(2);
4753 // Add the register. Don't copy the kill flag if there is one.
4754 MIB.addReg(MIB.getReg(1),
4755 getUndefRegState(MIB->getOperand(1).isUndef()));
4756 // Add back the immediate.
4757 MIB.addImm(ShiftAmt);
4758 return true;
4759 }
4760
expandPostRAPseudo(MachineInstr & MI) const4761 bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
4762 bool HasAVX = Subtarget.hasAVX();
4763 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
4764 switch (MI.getOpcode()) {
4765 case X86::MOV32r0:
4766 return Expand2AddrUndef(MIB, get(X86::XOR32rr));
4767 case X86::MOV32r1:
4768 return expandMOV32r1(MIB, *this, /*MinusOne=*/ false);
4769 case X86::MOV32r_1:
4770 return expandMOV32r1(MIB, *this, /*MinusOne=*/ true);
4771 case X86::MOV32ImmSExti8:
4772 case X86::MOV64ImmSExti8:
4773 return ExpandMOVImmSExti8(MIB, *this, Subtarget);
4774 case X86::SETB_C32r:
4775 return Expand2AddrUndef(MIB, get(X86::SBB32rr));
4776 case X86::SETB_C64r:
4777 return Expand2AddrUndef(MIB, get(X86::SBB64rr));
4778 case X86::MMX_SET0:
4779 return Expand2AddrUndef(MIB, get(X86::MMX_PXORirr));
4780 case X86::V_SET0:
4781 case X86::FsFLD0SS:
4782 case X86::FsFLD0SD:
4783 case X86::FsFLD0F128:
4784 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
4785 case X86::AVX_SET0: {
4786 assert(HasAVX && "AVX not supported");
4787 const TargetRegisterInfo *TRI = &getRegisterInfo();
4788 Register SrcReg = MIB.getReg(0);
4789 Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
4790 MIB->getOperand(0).setReg(XReg);
4791 Expand2AddrUndef(MIB, get(X86::VXORPSrr));
4792 MIB.addReg(SrcReg, RegState::ImplicitDefine);
4793 return true;
4794 }
4795 case X86::AVX512_128_SET0:
4796 case X86::AVX512_FsFLD0SH:
4797 case X86::AVX512_FsFLD0SS:
4798 case X86::AVX512_FsFLD0SD:
4799 case X86::AVX512_FsFLD0F128: {
4800 bool HasVLX = Subtarget.hasVLX();
4801 Register SrcReg = MIB.getReg(0);
4802 const TargetRegisterInfo *TRI = &getRegisterInfo();
4803 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16)
4804 return Expand2AddrUndef(MIB,
4805 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
4806 // Extended register without VLX. Use a larger XOR.
4807 SrcReg =
4808 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
4809 MIB->getOperand(0).setReg(SrcReg);
4810 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
4811 }
4812 case X86::AVX512_256_SET0:
4813 case X86::AVX512_512_SET0: {
4814 bool HasVLX = Subtarget.hasVLX();
4815 Register SrcReg = MIB.getReg(0);
4816 const TargetRegisterInfo *TRI = &getRegisterInfo();
4817 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) {
4818 Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
4819 MIB->getOperand(0).setReg(XReg);
4820 Expand2AddrUndef(MIB,
4821 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
4822 MIB.addReg(SrcReg, RegState::ImplicitDefine);
4823 return true;
4824 }
4825 if (MI.getOpcode() == X86::AVX512_256_SET0) {
4826 // No VLX so we must reference a zmm.
4827 unsigned ZReg =
4828 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
4829 MIB->getOperand(0).setReg(ZReg);
4830 }
4831 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
4832 }
4833 case X86::V_SETALLONES:
4834 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
4835 case X86::AVX2_SETALLONES:
4836 return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
4837 case X86::AVX1_SETALLONES: {
4838 Register Reg = MIB.getReg(0);
4839 // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS.
4840 MIB->setDesc(get(X86::VCMPPSYrri));
4841 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf);
4842 return true;
4843 }
4844 case X86::AVX512_512_SETALLONES: {
4845 Register Reg = MIB.getReg(0);
4846 MIB->setDesc(get(X86::VPTERNLOGDZrri));
4847 // VPTERNLOGD needs 3 register inputs and an immediate.
4848 // 0xff will return 1s for any input.
4849 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef)
4850 .addReg(Reg, RegState::Undef).addImm(0xff);
4851 return true;
4852 }
4853 case X86::AVX512_512_SEXT_MASK_32:
4854 case X86::AVX512_512_SEXT_MASK_64: {
4855 Register Reg = MIB.getReg(0);
4856 Register MaskReg = MIB.getReg(1);
4857 unsigned MaskState = getRegState(MIB->getOperand(1));
4858 unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ?
4859 X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz;
4860 MI.RemoveOperand(1);
4861 MIB->setDesc(get(Opc));
4862 // VPTERNLOG needs 3 register inputs and an immediate.
4863 // 0xff will return 1s for any input.
4864 MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState)
4865 .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff);
4866 return true;
4867 }
4868 case X86::VMOVAPSZ128rm_NOVLX:
4869 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm),
4870 get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
4871 case X86::VMOVUPSZ128rm_NOVLX:
4872 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm),
4873 get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
4874 case X86::VMOVAPSZ256rm_NOVLX:
4875 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm),
4876 get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
4877 case X86::VMOVUPSZ256rm_NOVLX:
4878 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm),
4879 get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
4880 case X86::VMOVAPSZ128mr_NOVLX:
4881 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr),
4882 get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
4883 case X86::VMOVUPSZ128mr_NOVLX:
4884 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr),
4885 get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
4886 case X86::VMOVAPSZ256mr_NOVLX:
4887 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr),
4888 get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
4889 case X86::VMOVUPSZ256mr_NOVLX:
4890 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
4891 get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
4892 case X86::MOV32ri64: {
4893 Register Reg = MIB.getReg(0);
4894 Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
4895 MI.setDesc(get(X86::MOV32ri));
4896 MIB->getOperand(0).setReg(Reg32);
4897 MIB.addReg(Reg, RegState::ImplicitDefine);
4898 return true;
4899 }
4900
4901 // KNL does not recognize dependency-breaking idioms for mask registers,
4902 // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1.
4903 // Using %k0 as the undef input register is a performance heuristic based
4904 // on the assumption that %k0 is used less frequently than the other mask
4905 // registers, since it is not usable as a write mask.
4906 // FIXME: A more advanced approach would be to choose the best input mask
4907 // register based on context.
4908 case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0);
4909 case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0);
4910 case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0);
4911 case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0);
4912 case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0);
4913 case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0);
4914 case TargetOpcode::LOAD_STACK_GUARD:
4915 expandLoadStackGuard(MIB, *this);
4916 return true;
4917 case X86::XOR64_FP:
4918 case X86::XOR32_FP:
4919 return expandXorFP(MIB, *this);
4920 case X86::SHLDROT32ri: return expandSHXDROT(MIB, get(X86::SHLD32rri8));
4921 case X86::SHLDROT64ri: return expandSHXDROT(MIB, get(X86::SHLD64rri8));
4922 case X86::SHRDROT32ri: return expandSHXDROT(MIB, get(X86::SHRD32rri8));
4923 case X86::SHRDROT64ri: return expandSHXDROT(MIB, get(X86::SHRD64rri8));
4924 case X86::ADD8rr_DB: MIB->setDesc(get(X86::OR8rr)); break;
4925 case X86::ADD16rr_DB: MIB->setDesc(get(X86::OR16rr)); break;
4926 case X86::ADD32rr_DB: MIB->setDesc(get(X86::OR32rr)); break;
4927 case X86::ADD64rr_DB: MIB->setDesc(get(X86::OR64rr)); break;
4928 case X86::ADD8ri_DB: MIB->setDesc(get(X86::OR8ri)); break;
4929 case X86::ADD16ri_DB: MIB->setDesc(get(X86::OR16ri)); break;
4930 case X86::ADD32ri_DB: MIB->setDesc(get(X86::OR32ri)); break;
4931 case X86::ADD64ri32_DB: MIB->setDesc(get(X86::OR64ri32)); break;
4932 case X86::ADD16ri8_DB: MIB->setDesc(get(X86::OR16ri8)); break;
4933 case X86::ADD32ri8_DB: MIB->setDesc(get(X86::OR32ri8)); break;
4934 case X86::ADD64ri8_DB: MIB->setDesc(get(X86::OR64ri8)); break;
4935 }
4936 return false;
4937 }
4938
4939 /// Return true for all instructions that only update
4940 /// the first 32 or 64-bits of the destination register and leave the rest
4941 /// unmodified. This can be used to avoid folding loads if the instructions
4942 /// only update part of the destination register, and the non-updated part is
4943 /// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
4944 /// instructions breaks the partial register dependency and it can improve
4945 /// performance. e.g.:
4946 ///
4947 /// movss (%rdi), %xmm0
4948 /// cvtss2sd %xmm0, %xmm0
4949 ///
4950 /// Instead of
4951 /// cvtss2sd (%rdi), %xmm0
4952 ///
4953 /// FIXME: This should be turned into a TSFlags.
4954 ///
hasPartialRegUpdate(unsigned Opcode,const X86Subtarget & Subtarget,bool ForLoadFold=false)4955 static bool hasPartialRegUpdate(unsigned Opcode,
4956 const X86Subtarget &Subtarget,
4957 bool ForLoadFold = false) {
4958 switch (Opcode) {
4959 case X86::CVTSI2SSrr:
4960 case X86::CVTSI2SSrm:
4961 case X86::CVTSI642SSrr:
4962 case X86::CVTSI642SSrm:
4963 case X86::CVTSI2SDrr:
4964 case X86::CVTSI2SDrm:
4965 case X86::CVTSI642SDrr:
4966 case X86::CVTSI642SDrm:
4967 // Load folding won't effect the undef register update since the input is
4968 // a GPR.
4969 return !ForLoadFold;
4970 case X86::CVTSD2SSrr:
4971 case X86::CVTSD2SSrm:
4972 case X86::CVTSS2SDrr:
4973 case X86::CVTSS2SDrm:
4974 case X86::MOVHPDrm:
4975 case X86::MOVHPSrm:
4976 case X86::MOVLPDrm:
4977 case X86::MOVLPSrm:
4978 case X86::RCPSSr:
4979 case X86::RCPSSm:
4980 case X86::RCPSSr_Int:
4981 case X86::RCPSSm_Int:
4982 case X86::ROUNDSDr:
4983 case X86::ROUNDSDm:
4984 case X86::ROUNDSSr:
4985 case X86::ROUNDSSm:
4986 case X86::RSQRTSSr:
4987 case X86::RSQRTSSm:
4988 case X86::RSQRTSSr_Int:
4989 case X86::RSQRTSSm_Int:
4990 case X86::SQRTSSr:
4991 case X86::SQRTSSm:
4992 case X86::SQRTSSr_Int:
4993 case X86::SQRTSSm_Int:
4994 case X86::SQRTSDr:
4995 case X86::SQRTSDm:
4996 case X86::SQRTSDr_Int:
4997 case X86::SQRTSDm_Int:
4998 return true;
4999 // GPR
5000 case X86::POPCNT32rm:
5001 case X86::POPCNT32rr:
5002 case X86::POPCNT64rm:
5003 case X86::POPCNT64rr:
5004 return Subtarget.hasPOPCNTFalseDeps();
5005 case X86::LZCNT32rm:
5006 case X86::LZCNT32rr:
5007 case X86::LZCNT64rm:
5008 case X86::LZCNT64rr:
5009 case X86::TZCNT32rm:
5010 case X86::TZCNT32rr:
5011 case X86::TZCNT64rm:
5012 case X86::TZCNT64rr:
5013 return Subtarget.hasLZCNTFalseDeps();
5014 }
5015
5016 return false;
5017 }
5018
5019 /// Inform the BreakFalseDeps pass how many idle
5020 /// instructions we would like before a partial register update.
getPartialRegUpdateClearance(const MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI) const5021 unsigned X86InstrInfo::getPartialRegUpdateClearance(
5022 const MachineInstr &MI, unsigned OpNum,
5023 const TargetRegisterInfo *TRI) const {
5024 if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget))
5025 return 0;
5026
5027 // If MI is marked as reading Reg, the partial register update is wanted.
5028 const MachineOperand &MO = MI.getOperand(0);
5029 Register Reg = MO.getReg();
5030 if (Reg.isVirtual()) {
5031 if (MO.readsReg() || MI.readsVirtualRegister(Reg))
5032 return 0;
5033 } else {
5034 if (MI.readsRegister(Reg, TRI))
5035 return 0;
5036 }
5037
5038 // If any instructions in the clearance range are reading Reg, insert a
5039 // dependency breaking instruction, which is inexpensive and is likely to
5040 // be hidden in other instruction's cycles.
5041 return PartialRegUpdateClearance;
5042 }
5043
5044 // Return true for any instruction the copies the high bits of the first source
5045 // operand into the unused high bits of the destination operand.
5046 // Also returns true for instructions that have two inputs where one may
5047 // be undef and we want it to use the same register as the other input.
hasUndefRegUpdate(unsigned Opcode,unsigned OpNum,bool ForLoadFold=false)5048 static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
5049 bool ForLoadFold = false) {
5050 // Set the OpNum parameter to the first source operand.
5051 switch (Opcode) {
5052 case X86::MMX_PUNPCKHBWirr:
5053 case X86::MMX_PUNPCKHWDirr:
5054 case X86::MMX_PUNPCKHDQirr:
5055 case X86::MMX_PUNPCKLBWirr:
5056 case X86::MMX_PUNPCKLWDirr:
5057 case X86::MMX_PUNPCKLDQirr:
5058 case X86::MOVHLPSrr:
5059 case X86::PACKSSWBrr:
5060 case X86::PACKUSWBrr:
5061 case X86::PACKSSDWrr:
5062 case X86::PACKUSDWrr:
5063 case X86::PUNPCKHBWrr:
5064 case X86::PUNPCKLBWrr:
5065 case X86::PUNPCKHWDrr:
5066 case X86::PUNPCKLWDrr:
5067 case X86::PUNPCKHDQrr:
5068 case X86::PUNPCKLDQrr:
5069 case X86::PUNPCKHQDQrr:
5070 case X86::PUNPCKLQDQrr:
5071 case X86::SHUFPDrri:
5072 case X86::SHUFPSrri:
5073 // These instructions are sometimes used with an undef first or second
5074 // source. Return true here so BreakFalseDeps will assign this source to the
5075 // same register as the first source to avoid a false dependency.
5076 // Operand 1 of these instructions is tied so they're separate from their
5077 // VEX counterparts.
5078 return OpNum == 2 && !ForLoadFold;
5079
5080 case X86::VMOVLHPSrr:
5081 case X86::VMOVLHPSZrr:
5082 case X86::VPACKSSWBrr:
5083 case X86::VPACKUSWBrr:
5084 case X86::VPACKSSDWrr:
5085 case X86::VPACKUSDWrr:
5086 case X86::VPACKSSWBZ128rr:
5087 case X86::VPACKUSWBZ128rr:
5088 case X86::VPACKSSDWZ128rr:
5089 case X86::VPACKUSDWZ128rr:
5090 case X86::VPERM2F128rr:
5091 case X86::VPERM2I128rr:
5092 case X86::VSHUFF32X4Z256rri:
5093 case X86::VSHUFF32X4Zrri:
5094 case X86::VSHUFF64X2Z256rri:
5095 case X86::VSHUFF64X2Zrri:
5096 case X86::VSHUFI32X4Z256rri:
5097 case X86::VSHUFI32X4Zrri:
5098 case X86::VSHUFI64X2Z256rri:
5099 case X86::VSHUFI64X2Zrri:
5100 case X86::VPUNPCKHBWrr:
5101 case X86::VPUNPCKLBWrr:
5102 case X86::VPUNPCKHBWYrr:
5103 case X86::VPUNPCKLBWYrr:
5104 case X86::VPUNPCKHBWZ128rr:
5105 case X86::VPUNPCKLBWZ128rr:
5106 case X86::VPUNPCKHBWZ256rr:
5107 case X86::VPUNPCKLBWZ256rr:
5108 case X86::VPUNPCKHBWZrr:
5109 case X86::VPUNPCKLBWZrr:
5110 case X86::VPUNPCKHWDrr:
5111 case X86::VPUNPCKLWDrr:
5112 case X86::VPUNPCKHWDYrr:
5113 case X86::VPUNPCKLWDYrr:
5114 case X86::VPUNPCKHWDZ128rr:
5115 case X86::VPUNPCKLWDZ128rr:
5116 case X86::VPUNPCKHWDZ256rr:
5117 case X86::VPUNPCKLWDZ256rr:
5118 case X86::VPUNPCKHWDZrr:
5119 case X86::VPUNPCKLWDZrr:
5120 case X86::VPUNPCKHDQrr:
5121 case X86::VPUNPCKLDQrr:
5122 case X86::VPUNPCKHDQYrr:
5123 case X86::VPUNPCKLDQYrr:
5124 case X86::VPUNPCKHDQZ128rr:
5125 case X86::VPUNPCKLDQZ128rr:
5126 case X86::VPUNPCKHDQZ256rr:
5127 case X86::VPUNPCKLDQZ256rr:
5128 case X86::VPUNPCKHDQZrr:
5129 case X86::VPUNPCKLDQZrr:
5130 case X86::VPUNPCKHQDQrr:
5131 case X86::VPUNPCKLQDQrr:
5132 case X86::VPUNPCKHQDQYrr:
5133 case X86::VPUNPCKLQDQYrr:
5134 case X86::VPUNPCKHQDQZ128rr:
5135 case X86::VPUNPCKLQDQZ128rr:
5136 case X86::VPUNPCKHQDQZ256rr:
5137 case X86::VPUNPCKLQDQZ256rr:
5138 case X86::VPUNPCKHQDQZrr:
5139 case X86::VPUNPCKLQDQZrr:
5140 // These instructions are sometimes used with an undef first or second
5141 // source. Return true here so BreakFalseDeps will assign this source to the
5142 // same register as the first source to avoid a false dependency.
5143 return (OpNum == 1 || OpNum == 2) && !ForLoadFold;
5144
5145 case X86::VCVTSI2SSrr:
5146 case X86::VCVTSI2SSrm:
5147 case X86::VCVTSI2SSrr_Int:
5148 case X86::VCVTSI2SSrm_Int:
5149 case X86::VCVTSI642SSrr:
5150 case X86::VCVTSI642SSrm:
5151 case X86::VCVTSI642SSrr_Int:
5152 case X86::VCVTSI642SSrm_Int:
5153 case X86::VCVTSI2SDrr:
5154 case X86::VCVTSI2SDrm:
5155 case X86::VCVTSI2SDrr_Int:
5156 case X86::VCVTSI2SDrm_Int:
5157 case X86::VCVTSI642SDrr:
5158 case X86::VCVTSI642SDrm:
5159 case X86::VCVTSI642SDrr_Int:
5160 case X86::VCVTSI642SDrm_Int:
5161 // AVX-512
5162 case X86::VCVTSI2SSZrr:
5163 case X86::VCVTSI2SSZrm:
5164 case X86::VCVTSI2SSZrr_Int:
5165 case X86::VCVTSI2SSZrrb_Int:
5166 case X86::VCVTSI2SSZrm_Int:
5167 case X86::VCVTSI642SSZrr:
5168 case X86::VCVTSI642SSZrm:
5169 case X86::VCVTSI642SSZrr_Int:
5170 case X86::VCVTSI642SSZrrb_Int:
5171 case X86::VCVTSI642SSZrm_Int:
5172 case X86::VCVTSI2SDZrr:
5173 case X86::VCVTSI2SDZrm:
5174 case X86::VCVTSI2SDZrr_Int:
5175 case X86::VCVTSI2SDZrm_Int:
5176 case X86::VCVTSI642SDZrr:
5177 case X86::VCVTSI642SDZrm:
5178 case X86::VCVTSI642SDZrr_Int:
5179 case X86::VCVTSI642SDZrrb_Int:
5180 case X86::VCVTSI642SDZrm_Int:
5181 case X86::VCVTUSI2SSZrr:
5182 case X86::VCVTUSI2SSZrm:
5183 case X86::VCVTUSI2SSZrr_Int:
5184 case X86::VCVTUSI2SSZrrb_Int:
5185 case X86::VCVTUSI2SSZrm_Int:
5186 case X86::VCVTUSI642SSZrr:
5187 case X86::VCVTUSI642SSZrm:
5188 case X86::VCVTUSI642SSZrr_Int:
5189 case X86::VCVTUSI642SSZrrb_Int:
5190 case X86::VCVTUSI642SSZrm_Int:
5191 case X86::VCVTUSI2SDZrr:
5192 case X86::VCVTUSI2SDZrm:
5193 case X86::VCVTUSI2SDZrr_Int:
5194 case X86::VCVTUSI2SDZrm_Int:
5195 case X86::VCVTUSI642SDZrr:
5196 case X86::VCVTUSI642SDZrm:
5197 case X86::VCVTUSI642SDZrr_Int:
5198 case X86::VCVTUSI642SDZrrb_Int:
5199 case X86::VCVTUSI642SDZrm_Int:
5200 case X86::VCVTSI2SHZrr:
5201 case X86::VCVTSI2SHZrm:
5202 case X86::VCVTSI2SHZrr_Int:
5203 case X86::VCVTSI2SHZrrb_Int:
5204 case X86::VCVTSI2SHZrm_Int:
5205 case X86::VCVTSI642SHZrr:
5206 case X86::VCVTSI642SHZrm:
5207 case X86::VCVTSI642SHZrr_Int:
5208 case X86::VCVTSI642SHZrrb_Int:
5209 case X86::VCVTSI642SHZrm_Int:
5210 case X86::VCVTUSI2SHZrr:
5211 case X86::VCVTUSI2SHZrm:
5212 case X86::VCVTUSI2SHZrr_Int:
5213 case X86::VCVTUSI2SHZrrb_Int:
5214 case X86::VCVTUSI2SHZrm_Int:
5215 case X86::VCVTUSI642SHZrr:
5216 case X86::VCVTUSI642SHZrm:
5217 case X86::VCVTUSI642SHZrr_Int:
5218 case X86::VCVTUSI642SHZrrb_Int:
5219 case X86::VCVTUSI642SHZrm_Int:
5220 // Load folding won't effect the undef register update since the input is
5221 // a GPR.
5222 return OpNum == 1 && !ForLoadFold;
5223 case X86::VCVTSD2SSrr:
5224 case X86::VCVTSD2SSrm:
5225 case X86::VCVTSD2SSrr_Int:
5226 case X86::VCVTSD2SSrm_Int:
5227 case X86::VCVTSS2SDrr:
5228 case X86::VCVTSS2SDrm:
5229 case X86::VCVTSS2SDrr_Int:
5230 case X86::VCVTSS2SDrm_Int:
5231 case X86::VRCPSSr:
5232 case X86::VRCPSSr_Int:
5233 case X86::VRCPSSm:
5234 case X86::VRCPSSm_Int:
5235 case X86::VROUNDSDr:
5236 case X86::VROUNDSDm:
5237 case X86::VROUNDSDr_Int:
5238 case X86::VROUNDSDm_Int:
5239 case X86::VROUNDSSr:
5240 case X86::VROUNDSSm:
5241 case X86::VROUNDSSr_Int:
5242 case X86::VROUNDSSm_Int:
5243 case X86::VRSQRTSSr:
5244 case X86::VRSQRTSSr_Int:
5245 case X86::VRSQRTSSm:
5246 case X86::VRSQRTSSm_Int:
5247 case X86::VSQRTSSr:
5248 case X86::VSQRTSSr_Int:
5249 case X86::VSQRTSSm:
5250 case X86::VSQRTSSm_Int:
5251 case X86::VSQRTSDr:
5252 case X86::VSQRTSDr_Int:
5253 case X86::VSQRTSDm:
5254 case X86::VSQRTSDm_Int:
5255 // AVX-512
5256 case X86::VCVTSD2SSZrr:
5257 case X86::VCVTSD2SSZrr_Int:
5258 case X86::VCVTSD2SSZrrb_Int:
5259 case X86::VCVTSD2SSZrm:
5260 case X86::VCVTSD2SSZrm_Int:
5261 case X86::VCVTSS2SDZrr:
5262 case X86::VCVTSS2SDZrr_Int:
5263 case X86::VCVTSS2SDZrrb_Int:
5264 case X86::VCVTSS2SDZrm:
5265 case X86::VCVTSS2SDZrm_Int:
5266 case X86::VGETEXPSDZr:
5267 case X86::VGETEXPSDZrb:
5268 case X86::VGETEXPSDZm:
5269 case X86::VGETEXPSSZr:
5270 case X86::VGETEXPSSZrb:
5271 case X86::VGETEXPSSZm:
5272 case X86::VGETMANTSDZrri:
5273 case X86::VGETMANTSDZrrib:
5274 case X86::VGETMANTSDZrmi:
5275 case X86::VGETMANTSSZrri:
5276 case X86::VGETMANTSSZrrib:
5277 case X86::VGETMANTSSZrmi:
5278 case X86::VRNDSCALESDZr:
5279 case X86::VRNDSCALESDZr_Int:
5280 case X86::VRNDSCALESDZrb_Int:
5281 case X86::VRNDSCALESDZm:
5282 case X86::VRNDSCALESDZm_Int:
5283 case X86::VRNDSCALESSZr:
5284 case X86::VRNDSCALESSZr_Int:
5285 case X86::VRNDSCALESSZrb_Int:
5286 case X86::VRNDSCALESSZm:
5287 case X86::VRNDSCALESSZm_Int:
5288 case X86::VRCP14SDZrr:
5289 case X86::VRCP14SDZrm:
5290 case X86::VRCP14SSZrr:
5291 case X86::VRCP14SSZrm:
5292 case X86::VRCPSHZrr:
5293 case X86::VRCPSHZrm:
5294 case X86::VRSQRTSHZrr:
5295 case X86::VRSQRTSHZrm:
5296 case X86::VREDUCESHZrmi:
5297 case X86::VREDUCESHZrri:
5298 case X86::VREDUCESHZrrib:
5299 case X86::VGETEXPSHZr:
5300 case X86::VGETEXPSHZrb:
5301 case X86::VGETEXPSHZm:
5302 case X86::VGETMANTSHZrri:
5303 case X86::VGETMANTSHZrrib:
5304 case X86::VGETMANTSHZrmi:
5305 case X86::VRNDSCALESHZr:
5306 case X86::VRNDSCALESHZr_Int:
5307 case X86::VRNDSCALESHZrb_Int:
5308 case X86::VRNDSCALESHZm:
5309 case X86::VRNDSCALESHZm_Int:
5310 case X86::VSQRTSHZr:
5311 case X86::VSQRTSHZr_Int:
5312 case X86::VSQRTSHZrb_Int:
5313 case X86::VSQRTSHZm:
5314 case X86::VSQRTSHZm_Int:
5315 case X86::VRCP28SDZr:
5316 case X86::VRCP28SDZrb:
5317 case X86::VRCP28SDZm:
5318 case X86::VRCP28SSZr:
5319 case X86::VRCP28SSZrb:
5320 case X86::VRCP28SSZm:
5321 case X86::VREDUCESSZrmi:
5322 case X86::VREDUCESSZrri:
5323 case X86::VREDUCESSZrrib:
5324 case X86::VRSQRT14SDZrr:
5325 case X86::VRSQRT14SDZrm:
5326 case X86::VRSQRT14SSZrr:
5327 case X86::VRSQRT14SSZrm:
5328 case X86::VRSQRT28SDZr:
5329 case X86::VRSQRT28SDZrb:
5330 case X86::VRSQRT28SDZm:
5331 case X86::VRSQRT28SSZr:
5332 case X86::VRSQRT28SSZrb:
5333 case X86::VRSQRT28SSZm:
5334 case X86::VSQRTSSZr:
5335 case X86::VSQRTSSZr_Int:
5336 case X86::VSQRTSSZrb_Int:
5337 case X86::VSQRTSSZm:
5338 case X86::VSQRTSSZm_Int:
5339 case X86::VSQRTSDZr:
5340 case X86::VSQRTSDZr_Int:
5341 case X86::VSQRTSDZrb_Int:
5342 case X86::VSQRTSDZm:
5343 case X86::VSQRTSDZm_Int:
5344 case X86::VCVTSD2SHZrr:
5345 case X86::VCVTSD2SHZrr_Int:
5346 case X86::VCVTSD2SHZrrb_Int:
5347 case X86::VCVTSD2SHZrm:
5348 case X86::VCVTSD2SHZrm_Int:
5349 case X86::VCVTSS2SHZrr:
5350 case X86::VCVTSS2SHZrr_Int:
5351 case X86::VCVTSS2SHZrrb_Int:
5352 case X86::VCVTSS2SHZrm:
5353 case X86::VCVTSS2SHZrm_Int:
5354 case X86::VCVTSH2SDZrr:
5355 case X86::VCVTSH2SDZrr_Int:
5356 case X86::VCVTSH2SDZrrb_Int:
5357 case X86::VCVTSH2SDZrm:
5358 case X86::VCVTSH2SDZrm_Int:
5359 case X86::VCVTSH2SSZrr:
5360 case X86::VCVTSH2SSZrr_Int:
5361 case X86::VCVTSH2SSZrrb_Int:
5362 case X86::VCVTSH2SSZrm:
5363 case X86::VCVTSH2SSZrm_Int:
5364 return OpNum == 1;
5365 case X86::VMOVSSZrrk:
5366 case X86::VMOVSDZrrk:
5367 return OpNum == 3 && !ForLoadFold;
5368 case X86::VMOVSSZrrkz:
5369 case X86::VMOVSDZrrkz:
5370 return OpNum == 2 && !ForLoadFold;
5371 }
5372
5373 return false;
5374 }
5375
5376 /// Inform the BreakFalseDeps pass how many idle instructions we would like
5377 /// before certain undef register reads.
5378 ///
5379 /// This catches the VCVTSI2SD family of instructions:
5380 ///
5381 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
5382 ///
5383 /// We should to be careful *not* to catch VXOR idioms which are presumably
5384 /// handled specially in the pipeline:
5385 ///
5386 /// vxorps undef %xmm1, undef %xmm1, %xmm1
5387 ///
5388 /// Like getPartialRegUpdateClearance, this makes a strong assumption that the
5389 /// high bits that are passed-through are not live.
5390 unsigned
getUndefRegClearance(const MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI) const5391 X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
5392 const TargetRegisterInfo *TRI) const {
5393 const MachineOperand &MO = MI.getOperand(OpNum);
5394 if (Register::isPhysicalRegister(MO.getReg()) &&
5395 hasUndefRegUpdate(MI.getOpcode(), OpNum))
5396 return UndefRegClearance;
5397
5398 return 0;
5399 }
5400
breakPartialRegDependency(MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI) const5401 void X86InstrInfo::breakPartialRegDependency(
5402 MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
5403 Register Reg = MI.getOperand(OpNum).getReg();
5404 // If MI kills this register, the false dependence is already broken.
5405 if (MI.killsRegister(Reg, TRI))
5406 return;
5407
5408 if (X86::VR128RegClass.contains(Reg)) {
5409 // These instructions are all floating point domain, so xorps is the best
5410 // choice.
5411 unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
5412 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg)
5413 .addReg(Reg, RegState::Undef)
5414 .addReg(Reg, RegState::Undef);
5415 MI.addRegisterKilled(Reg, TRI, true);
5416 } else if (X86::VR256RegClass.contains(Reg)) {
5417 // Use vxorps to clear the full ymm register.
5418 // It wants to read and write the xmm sub-register.
5419 Register XReg = TRI->getSubReg(Reg, X86::sub_xmm);
5420 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg)
5421 .addReg(XReg, RegState::Undef)
5422 .addReg(XReg, RegState::Undef)
5423 .addReg(Reg, RegState::ImplicitDefine);
5424 MI.addRegisterKilled(Reg, TRI, true);
5425 } else if (X86::GR64RegClass.contains(Reg)) {
5426 // Using XOR32rr because it has shorter encoding and zeros up the upper bits
5427 // as well.
5428 Register XReg = TRI->getSubReg(Reg, X86::sub_32bit);
5429 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg)
5430 .addReg(XReg, RegState::Undef)
5431 .addReg(XReg, RegState::Undef)
5432 .addReg(Reg, RegState::ImplicitDefine);
5433 MI.addRegisterKilled(Reg, TRI, true);
5434 } else if (X86::GR32RegClass.contains(Reg)) {
5435 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg)
5436 .addReg(Reg, RegState::Undef)
5437 .addReg(Reg, RegState::Undef);
5438 MI.addRegisterKilled(Reg, TRI, true);
5439 }
5440 }
5441
addOperands(MachineInstrBuilder & MIB,ArrayRef<MachineOperand> MOs,int PtrOffset=0)5442 static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
5443 int PtrOffset = 0) {
5444 unsigned NumAddrOps = MOs.size();
5445
5446 if (NumAddrOps < 4) {
5447 // FrameIndex only - add an immediate offset (whether its zero or not).
5448 for (unsigned i = 0; i != NumAddrOps; ++i)
5449 MIB.add(MOs[i]);
5450 addOffset(MIB, PtrOffset);
5451 } else {
5452 // General Memory Addressing - we need to add any offset to an existing
5453 // offset.
5454 assert(MOs.size() == 5 && "Unexpected memory operand list length");
5455 for (unsigned i = 0; i != NumAddrOps; ++i) {
5456 const MachineOperand &MO = MOs[i];
5457 if (i == 3 && PtrOffset != 0) {
5458 MIB.addDisp(MO, PtrOffset);
5459 } else {
5460 MIB.add(MO);
5461 }
5462 }
5463 }
5464 }
5465
updateOperandRegConstraints(MachineFunction & MF,MachineInstr & NewMI,const TargetInstrInfo & TII)5466 static void updateOperandRegConstraints(MachineFunction &MF,
5467 MachineInstr &NewMI,
5468 const TargetInstrInfo &TII) {
5469 MachineRegisterInfo &MRI = MF.getRegInfo();
5470 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
5471
5472 for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
5473 MachineOperand &MO = NewMI.getOperand(Idx);
5474 // We only need to update constraints on virtual register operands.
5475 if (!MO.isReg())
5476 continue;
5477 Register Reg = MO.getReg();
5478 if (!Reg.isVirtual())
5479 continue;
5480
5481 auto *NewRC = MRI.constrainRegClass(
5482 Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF));
5483 if (!NewRC) {
5484 LLVM_DEBUG(
5485 dbgs() << "WARNING: Unable to update register constraint for operand "
5486 << Idx << " of instruction:\n";
5487 NewMI.dump(); dbgs() << "\n");
5488 }
5489 }
5490 }
5491
FuseTwoAddrInst(MachineFunction & MF,unsigned Opcode,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,MachineInstr & MI,const TargetInstrInfo & TII)5492 static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
5493 ArrayRef<MachineOperand> MOs,
5494 MachineBasicBlock::iterator InsertPt,
5495 MachineInstr &MI,
5496 const TargetInstrInfo &TII) {
5497 // Create the base instruction with the memory operand as the first part.
5498 // Omit the implicit operands, something BuildMI can't do.
5499 MachineInstr *NewMI =
5500 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
5501 MachineInstrBuilder MIB(MF, NewMI);
5502 addOperands(MIB, MOs);
5503
5504 // Loop over the rest of the ri operands, converting them over.
5505 unsigned NumOps = MI.getDesc().getNumOperands() - 2;
5506 for (unsigned i = 0; i != NumOps; ++i) {
5507 MachineOperand &MO = MI.getOperand(i + 2);
5508 MIB.add(MO);
5509 }
5510 for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) {
5511 MachineOperand &MO = MI.getOperand(i);
5512 MIB.add(MO);
5513 }
5514
5515 updateOperandRegConstraints(MF, *NewMI, TII);
5516
5517 MachineBasicBlock *MBB = InsertPt->getParent();
5518 MBB->insert(InsertPt, NewMI);
5519
5520 return MIB;
5521 }
5522
FuseInst(MachineFunction & MF,unsigned Opcode,unsigned OpNo,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,MachineInstr & MI,const TargetInstrInfo & TII,int PtrOffset=0)5523 static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
5524 unsigned OpNo, ArrayRef<MachineOperand> MOs,
5525 MachineBasicBlock::iterator InsertPt,
5526 MachineInstr &MI, const TargetInstrInfo &TII,
5527 int PtrOffset = 0) {
5528 // Omit the implicit operands, something BuildMI can't do.
5529 MachineInstr *NewMI =
5530 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
5531 MachineInstrBuilder MIB(MF, NewMI);
5532
5533 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
5534 MachineOperand &MO = MI.getOperand(i);
5535 if (i == OpNo) {
5536 assert(MO.isReg() && "Expected to fold into reg operand!");
5537 addOperands(MIB, MOs, PtrOffset);
5538 } else {
5539 MIB.add(MO);
5540 }
5541 }
5542
5543 updateOperandRegConstraints(MF, *NewMI, TII);
5544
5545 // Copy the NoFPExcept flag from the instruction we're fusing.
5546 if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
5547 NewMI->setFlag(MachineInstr::MIFlag::NoFPExcept);
5548
5549 MachineBasicBlock *MBB = InsertPt->getParent();
5550 MBB->insert(InsertPt, NewMI);
5551
5552 return MIB;
5553 }
5554
MakeM0Inst(const TargetInstrInfo & TII,unsigned Opcode,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,MachineInstr & MI)5555 static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
5556 ArrayRef<MachineOperand> MOs,
5557 MachineBasicBlock::iterator InsertPt,
5558 MachineInstr &MI) {
5559 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
5560 MI.getDebugLoc(), TII.get(Opcode));
5561 addOperands(MIB, MOs);
5562 return MIB.addImm(0);
5563 }
5564
foldMemoryOperandCustom(MachineFunction & MF,MachineInstr & MI,unsigned OpNum,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,unsigned Size,Align Alignment) const5565 MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
5566 MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
5567 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
5568 unsigned Size, Align Alignment) const {
5569 switch (MI.getOpcode()) {
5570 case X86::INSERTPSrr:
5571 case X86::VINSERTPSrr:
5572 case X86::VINSERTPSZrr:
5573 // Attempt to convert the load of inserted vector into a fold load
5574 // of a single float.
5575 if (OpNum == 2) {
5576 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
5577 unsigned ZMask = Imm & 15;
5578 unsigned DstIdx = (Imm >> 4) & 3;
5579 unsigned SrcIdx = (Imm >> 6) & 3;
5580
5581 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5582 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5583 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5584 if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(4)) {
5585 int PtrOffset = SrcIdx * 4;
5586 unsigned NewImm = (DstIdx << 4) | ZMask;
5587 unsigned NewOpCode =
5588 (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm :
5589 (MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm :
5590 X86::INSERTPSrm;
5591 MachineInstr *NewMI =
5592 FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
5593 NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
5594 return NewMI;
5595 }
5596 }
5597 break;
5598 case X86::MOVHLPSrr:
5599 case X86::VMOVHLPSrr:
5600 case X86::VMOVHLPSZrr:
5601 // Move the upper 64-bits of the second operand to the lower 64-bits.
5602 // To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
5603 // TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
5604 if (OpNum == 2) {
5605 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5606 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5607 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5608 if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) {
5609 unsigned NewOpCode =
5610 (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm :
5611 (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm :
5612 X86::MOVLPSrm;
5613 MachineInstr *NewMI =
5614 FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8);
5615 return NewMI;
5616 }
5617 }
5618 break;
5619 case X86::UNPCKLPDrr:
5620 // If we won't be able to fold this to the memory form of UNPCKL, use
5621 // MOVHPD instead. Done as custom because we can't have this in the load
5622 // table twice.
5623 if (OpNum == 2) {
5624 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5625 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
5626 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5627 if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) {
5628 MachineInstr *NewMI =
5629 FuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this);
5630 return NewMI;
5631 }
5632 }
5633 break;
5634 }
5635
5636 return nullptr;
5637 }
5638
shouldPreventUndefRegUpdateMemFold(MachineFunction & MF,MachineInstr & MI)5639 static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF,
5640 MachineInstr &MI) {
5641 if (!hasUndefRegUpdate(MI.getOpcode(), 1, /*ForLoadFold*/true) ||
5642 !MI.getOperand(1).isReg())
5643 return false;
5644
5645 // The are two cases we need to handle depending on where in the pipeline
5646 // the folding attempt is being made.
5647 // -Register has the undef flag set.
5648 // -Register is produced by the IMPLICIT_DEF instruction.
5649
5650 if (MI.getOperand(1).isUndef())
5651 return true;
5652
5653 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5654 MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg());
5655 return VRegDef && VRegDef->isImplicitDef();
5656 }
5657
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,unsigned OpNum,ArrayRef<MachineOperand> MOs,MachineBasicBlock::iterator InsertPt,unsigned Size,Align Alignment,bool AllowCommute) const5658 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
5659 MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
5660 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
5661 unsigned Size, Align Alignment, bool AllowCommute) const {
5662 bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
5663 bool isTwoAddrFold = false;
5664
5665 // For CPUs that favor the register form of a call or push,
5666 // do not fold loads into calls or pushes, unless optimizing for size
5667 // aggressively.
5668 if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() &&
5669 (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
5670 MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
5671 MI.getOpcode() == X86::PUSH64r))
5672 return nullptr;
5673
5674 // Avoid partial and undef register update stalls unless optimizing for size.
5675 if (!MF.getFunction().hasOptSize() &&
5676 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
5677 shouldPreventUndefRegUpdateMemFold(MF, MI)))
5678 return nullptr;
5679
5680 unsigned NumOps = MI.getDesc().getNumOperands();
5681 bool isTwoAddr =
5682 NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
5683
5684 // FIXME: AsmPrinter doesn't know how to handle
5685 // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
5686 if (MI.getOpcode() == X86::ADD32ri &&
5687 MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
5688 return nullptr;
5689
5690 // GOTTPOFF relocation loads can only be folded into add instructions.
5691 // FIXME: Need to exclude other relocations that only support specific
5692 // instructions.
5693 if (MOs.size() == X86::AddrNumOperands &&
5694 MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF &&
5695 MI.getOpcode() != X86::ADD64rr)
5696 return nullptr;
5697
5698 MachineInstr *NewMI = nullptr;
5699
5700 // Attempt to fold any custom cases we have.
5701 if (MachineInstr *CustomMI = foldMemoryOperandCustom(
5702 MF, MI, OpNum, MOs, InsertPt, Size, Alignment))
5703 return CustomMI;
5704
5705 const X86MemoryFoldTableEntry *I = nullptr;
5706
5707 // Folding a memory location into the two-address part of a two-address
5708 // instruction is different than folding it other places. It requires
5709 // replacing the *two* registers with the memory location.
5710 if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() &&
5711 MI.getOperand(1).isReg() &&
5712 MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
5713 I = lookupTwoAddrFoldTable(MI.getOpcode());
5714 isTwoAddrFold = true;
5715 } else {
5716 if (OpNum == 0) {
5717 if (MI.getOpcode() == X86::MOV32r0) {
5718 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
5719 if (NewMI)
5720 return NewMI;
5721 }
5722 }
5723
5724 I = lookupFoldTable(MI.getOpcode(), OpNum);
5725 }
5726
5727 if (I != nullptr) {
5728 unsigned Opcode = I->DstOp;
5729 bool FoldedLoad =
5730 isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_LOAD) || OpNum > 0;
5731 bool FoldedStore =
5732 isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_STORE);
5733 MaybeAlign MinAlign =
5734 decodeMaybeAlign((I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT);
5735 if (MinAlign && Alignment < *MinAlign)
5736 return nullptr;
5737 bool NarrowToMOV32rm = false;
5738 if (Size) {
5739 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5740 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum,
5741 &RI, MF);
5742 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
5743 // Check if it's safe to fold the load. If the size of the object is
5744 // narrower than the load width, then it's not.
5745 // FIXME: Allow scalar intrinsic instructions like ADDSSrm_Int.
5746 if (FoldedLoad && Size < RCSize) {
5747 // If this is a 64-bit load, but the spill slot is 32, then we can do
5748 // a 32-bit load which is implicitly zero-extended. This likely is
5749 // due to live interval analysis remat'ing a load from stack slot.
5750 if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
5751 return nullptr;
5752 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
5753 return nullptr;
5754 Opcode = X86::MOV32rm;
5755 NarrowToMOV32rm = true;
5756 }
5757 // For stores, make sure the size of the object is equal to the size of
5758 // the store. If the object is larger, the extra bits would be garbage. If
5759 // the object is smaller we might overwrite another object or fault.
5760 if (FoldedStore && Size != RCSize)
5761 return nullptr;
5762 }
5763
5764 if (isTwoAddrFold)
5765 NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this);
5766 else
5767 NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
5768
5769 if (NarrowToMOV32rm) {
5770 // If this is the special case where we use a MOV32rm to load a 32-bit
5771 // value and zero-extend the top bits. Change the destination register
5772 // to a 32-bit one.
5773 Register DstReg = NewMI->getOperand(0).getReg();
5774 if (DstReg.isPhysical())
5775 NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit));
5776 else
5777 NewMI->getOperand(0).setSubReg(X86::sub_32bit);
5778 }
5779 return NewMI;
5780 }
5781
5782 // If the instruction and target operand are commutable, commute the
5783 // instruction and try again.
5784 if (AllowCommute) {
5785 unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
5786 if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
5787 bool HasDef = MI.getDesc().getNumDefs();
5788 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
5789 Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
5790 Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
5791 bool Tied1 =
5792 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
5793 bool Tied2 =
5794 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
5795
5796 // If either of the commutable operands are tied to the destination
5797 // then we can not commute + fold.
5798 if ((HasDef && Reg0 == Reg1 && Tied1) ||
5799 (HasDef && Reg0 == Reg2 && Tied2))
5800 return nullptr;
5801
5802 MachineInstr *CommutedMI =
5803 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
5804 if (!CommutedMI) {
5805 // Unable to commute.
5806 return nullptr;
5807 }
5808 if (CommutedMI != &MI) {
5809 // New instruction. We can't fold from this.
5810 CommutedMI->eraseFromParent();
5811 return nullptr;
5812 }
5813
5814 // Attempt to fold with the commuted version of the instruction.
5815 NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, Size,
5816 Alignment, /*AllowCommute=*/false);
5817 if (NewMI)
5818 return NewMI;
5819
5820 // Folding failed again - undo the commute before returning.
5821 MachineInstr *UncommutedMI =
5822 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
5823 if (!UncommutedMI) {
5824 // Unable to commute.
5825 return nullptr;
5826 }
5827 if (UncommutedMI != &MI) {
5828 // New instruction. It doesn't need to be kept.
5829 UncommutedMI->eraseFromParent();
5830 return nullptr;
5831 }
5832
5833 // Return here to prevent duplicate fuse failure report.
5834 return nullptr;
5835 }
5836 }
5837
5838 // No fusion
5839 if (PrintFailedFusing && !MI.isCopy())
5840 dbgs() << "We failed to fuse operand " << OpNum << " in " << MI;
5841 return nullptr;
5842 }
5843
5844 MachineInstr *
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,int FrameIndex,LiveIntervals * LIS,VirtRegMap * VRM) const5845 X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
5846 ArrayRef<unsigned> Ops,
5847 MachineBasicBlock::iterator InsertPt,
5848 int FrameIndex, LiveIntervals *LIS,
5849 VirtRegMap *VRM) const {
5850 // Check switch flag
5851 if (NoFusing)
5852 return nullptr;
5853
5854 // Avoid partial and undef register update stalls unless optimizing for size.
5855 if (!MF.getFunction().hasOptSize() &&
5856 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
5857 shouldPreventUndefRegUpdateMemFold(MF, MI)))
5858 return nullptr;
5859
5860 // Don't fold subreg spills, or reloads that use a high subreg.
5861 for (auto Op : Ops) {
5862 MachineOperand &MO = MI.getOperand(Op);
5863 auto SubReg = MO.getSubReg();
5864 if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi))
5865 return nullptr;
5866 }
5867
5868 const MachineFrameInfo &MFI = MF.getFrameInfo();
5869 unsigned Size = MFI.getObjectSize(FrameIndex);
5870 Align Alignment = MFI.getObjectAlign(FrameIndex);
5871 // If the function stack isn't realigned we don't want to fold instructions
5872 // that need increased alignment.
5873 if (!RI.hasStackRealignment(MF))
5874 Alignment =
5875 std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign());
5876 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
5877 unsigned NewOpc = 0;
5878 unsigned RCSize = 0;
5879 switch (MI.getOpcode()) {
5880 default: return nullptr;
5881 case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
5882 case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
5883 case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
5884 case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
5885 }
5886 // Check if it's safe to fold the load. If the size of the object is
5887 // narrower than the load width, then it's not.
5888 if (Size < RCSize)
5889 return nullptr;
5890 // Change to CMPXXri r, 0 first.
5891 MI.setDesc(get(NewOpc));
5892 MI.getOperand(1).ChangeToImmediate(0);
5893 } else if (Ops.size() != 1)
5894 return nullptr;
5895
5896 return foldMemoryOperandImpl(MF, MI, Ops[0],
5897 MachineOperand::CreateFI(FrameIndex), InsertPt,
5898 Size, Alignment, /*AllowCommute=*/true);
5899 }
5900
5901 /// Check if \p LoadMI is a partial register load that we can't fold into \p MI
5902 /// because the latter uses contents that wouldn't be defined in the folded
5903 /// version. For instance, this transformation isn't legal:
5904 /// movss (%rdi), %xmm0
5905 /// addps %xmm0, %xmm0
5906 /// ->
5907 /// addps (%rdi), %xmm0
5908 ///
5909 /// But this one is:
5910 /// movss (%rdi), %xmm0
5911 /// addss %xmm0, %xmm0
5912 /// ->
5913 /// addss (%rdi), %xmm0
5914 ///
isNonFoldablePartialRegisterLoad(const MachineInstr & LoadMI,const MachineInstr & UserMI,const MachineFunction & MF)5915 static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
5916 const MachineInstr &UserMI,
5917 const MachineFunction &MF) {
5918 unsigned Opc = LoadMI.getOpcode();
5919 unsigned UserOpc = UserMI.getOpcode();
5920 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5921 const TargetRegisterClass *RC =
5922 MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg());
5923 unsigned RegSize = TRI.getRegSizeInBits(*RC);
5924
5925 if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm ||
5926 Opc == X86::MOVSSrm_alt || Opc == X86::VMOVSSrm_alt ||
5927 Opc == X86::VMOVSSZrm_alt) &&
5928 RegSize > 32) {
5929 // These instructions only load 32 bits, we can't fold them if the
5930 // destination register is wider than 32 bits (4 bytes), and its user
5931 // instruction isn't scalar (SS).
5932 switch (UserOpc) {
5933 case X86::CVTSS2SDrr_Int:
5934 case X86::VCVTSS2SDrr_Int:
5935 case X86::VCVTSS2SDZrr_Int:
5936 case X86::VCVTSS2SDZrr_Intk:
5937 case X86::VCVTSS2SDZrr_Intkz:
5938 case X86::CVTSS2SIrr_Int: case X86::CVTSS2SI64rr_Int:
5939 case X86::VCVTSS2SIrr_Int: case X86::VCVTSS2SI64rr_Int:
5940 case X86::VCVTSS2SIZrr_Int: case X86::VCVTSS2SI64Zrr_Int:
5941 case X86::CVTTSS2SIrr_Int: case X86::CVTTSS2SI64rr_Int:
5942 case X86::VCVTTSS2SIrr_Int: case X86::VCVTTSS2SI64rr_Int:
5943 case X86::VCVTTSS2SIZrr_Int: case X86::VCVTTSS2SI64Zrr_Int:
5944 case X86::VCVTSS2USIZrr_Int: case X86::VCVTSS2USI64Zrr_Int:
5945 case X86::VCVTTSS2USIZrr_Int: case X86::VCVTTSS2USI64Zrr_Int:
5946 case X86::RCPSSr_Int: case X86::VRCPSSr_Int:
5947 case X86::RSQRTSSr_Int: case X86::VRSQRTSSr_Int:
5948 case X86::ROUNDSSr_Int: case X86::VROUNDSSr_Int:
5949 case X86::COMISSrr_Int: case X86::VCOMISSrr_Int: case X86::VCOMISSZrr_Int:
5950 case X86::UCOMISSrr_Int:case X86::VUCOMISSrr_Int:case X86::VUCOMISSZrr_Int:
5951 case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int:
5952 case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int:
5953 case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int:
5954 case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int:
5955 case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int:
5956 case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int:
5957 case X86::SQRTSSr_Int: case X86::VSQRTSSr_Int: case X86::VSQRTSSZr_Int:
5958 case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int:
5959 case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz:
5960 case X86::VCMPSSZrr_Intk:
5961 case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz:
5962 case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz:
5963 case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz:
5964 case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz:
5965 case X86::VSQRTSSZr_Intk: case X86::VSQRTSSZr_Intkz:
5966 case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz:
5967 case X86::VFMADDSS4rr_Int: case X86::VFNMADDSS4rr_Int:
5968 case X86::VFMSUBSS4rr_Int: case X86::VFNMSUBSS4rr_Int:
5969 case X86::VFMADD132SSr_Int: case X86::VFNMADD132SSr_Int:
5970 case X86::VFMADD213SSr_Int: case X86::VFNMADD213SSr_Int:
5971 case X86::VFMADD231SSr_Int: case X86::VFNMADD231SSr_Int:
5972 case X86::VFMSUB132SSr_Int: case X86::VFNMSUB132SSr_Int:
5973 case X86::VFMSUB213SSr_Int: case X86::VFNMSUB213SSr_Int:
5974 case X86::VFMSUB231SSr_Int: case X86::VFNMSUB231SSr_Int:
5975 case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int:
5976 case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int:
5977 case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int:
5978 case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int:
5979 case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int:
5980 case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int:
5981 case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk:
5982 case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk:
5983 case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk:
5984 case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk:
5985 case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk:
5986 case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk:
5987 case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz:
5988 case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz:
5989 case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz:
5990 case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz:
5991 case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz:
5992 case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz:
5993 case X86::VFIXUPIMMSSZrri:
5994 case X86::VFIXUPIMMSSZrrik:
5995 case X86::VFIXUPIMMSSZrrikz:
5996 case X86::VFPCLASSSSZrr:
5997 case X86::VFPCLASSSSZrrk:
5998 case X86::VGETEXPSSZr:
5999 case X86::VGETEXPSSZrk:
6000 case X86::VGETEXPSSZrkz:
6001 case X86::VGETMANTSSZrri:
6002 case X86::VGETMANTSSZrrik:
6003 case X86::VGETMANTSSZrrikz:
6004 case X86::VRANGESSZrri:
6005 case X86::VRANGESSZrrik:
6006 case X86::VRANGESSZrrikz:
6007 case X86::VRCP14SSZrr:
6008 case X86::VRCP14SSZrrk:
6009 case X86::VRCP14SSZrrkz:
6010 case X86::VRCP28SSZr:
6011 case X86::VRCP28SSZrk:
6012 case X86::VRCP28SSZrkz:
6013 case X86::VREDUCESSZrri:
6014 case X86::VREDUCESSZrrik:
6015 case X86::VREDUCESSZrrikz:
6016 case X86::VRNDSCALESSZr_Int:
6017 case X86::VRNDSCALESSZr_Intk:
6018 case X86::VRNDSCALESSZr_Intkz:
6019 case X86::VRSQRT14SSZrr:
6020 case X86::VRSQRT14SSZrrk:
6021 case X86::VRSQRT14SSZrrkz:
6022 case X86::VRSQRT28SSZr:
6023 case X86::VRSQRT28SSZrk:
6024 case X86::VRSQRT28SSZrkz:
6025 case X86::VSCALEFSSZrr:
6026 case X86::VSCALEFSSZrrk:
6027 case X86::VSCALEFSSZrrkz:
6028 return false;
6029 default:
6030 return true;
6031 }
6032 }
6033
6034 if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm ||
6035 Opc == X86::MOVSDrm_alt || Opc == X86::VMOVSDrm_alt ||
6036 Opc == X86::VMOVSDZrm_alt) &&
6037 RegSize > 64) {
6038 // These instructions only load 64 bits, we can't fold them if the
6039 // destination register is wider than 64 bits (8 bytes), and its user
6040 // instruction isn't scalar (SD).
6041 switch (UserOpc) {
6042 case X86::CVTSD2SSrr_Int:
6043 case X86::VCVTSD2SSrr_Int:
6044 case X86::VCVTSD2SSZrr_Int:
6045 case X86::VCVTSD2SSZrr_Intk:
6046 case X86::VCVTSD2SSZrr_Intkz:
6047 case X86::CVTSD2SIrr_Int: case X86::CVTSD2SI64rr_Int:
6048 case X86::VCVTSD2SIrr_Int: case X86::VCVTSD2SI64rr_Int:
6049 case X86::VCVTSD2SIZrr_Int: case X86::VCVTSD2SI64Zrr_Int:
6050 case X86::CVTTSD2SIrr_Int: case X86::CVTTSD2SI64rr_Int:
6051 case X86::VCVTTSD2SIrr_Int: case X86::VCVTTSD2SI64rr_Int:
6052 case X86::VCVTTSD2SIZrr_Int: case X86::VCVTTSD2SI64Zrr_Int:
6053 case X86::VCVTSD2USIZrr_Int: case X86::VCVTSD2USI64Zrr_Int:
6054 case X86::VCVTTSD2USIZrr_Int: case X86::VCVTTSD2USI64Zrr_Int:
6055 case X86::ROUNDSDr_Int: case X86::VROUNDSDr_Int:
6056 case X86::COMISDrr_Int: case X86::VCOMISDrr_Int: case X86::VCOMISDZrr_Int:
6057 case X86::UCOMISDrr_Int:case X86::VUCOMISDrr_Int:case X86::VUCOMISDZrr_Int:
6058 case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int:
6059 case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int:
6060 case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int:
6061 case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int:
6062 case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int:
6063 case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int:
6064 case X86::SQRTSDr_Int: case X86::VSQRTSDr_Int: case X86::VSQRTSDZr_Int:
6065 case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int:
6066 case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz:
6067 case X86::VCMPSDZrr_Intk:
6068 case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz:
6069 case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz:
6070 case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz:
6071 case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz:
6072 case X86::VSQRTSDZr_Intk: case X86::VSQRTSDZr_Intkz:
6073 case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz:
6074 case X86::VFMADDSD4rr_Int: case X86::VFNMADDSD4rr_Int:
6075 case X86::VFMSUBSD4rr_Int: case X86::VFNMSUBSD4rr_Int:
6076 case X86::VFMADD132SDr_Int: case X86::VFNMADD132SDr_Int:
6077 case X86::VFMADD213SDr_Int: case X86::VFNMADD213SDr_Int:
6078 case X86::VFMADD231SDr_Int: case X86::VFNMADD231SDr_Int:
6079 case X86::VFMSUB132SDr_Int: case X86::VFNMSUB132SDr_Int:
6080 case X86::VFMSUB213SDr_Int: case X86::VFNMSUB213SDr_Int:
6081 case X86::VFMSUB231SDr_Int: case X86::VFNMSUB231SDr_Int:
6082 case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int:
6083 case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int:
6084 case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int:
6085 case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int:
6086 case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int:
6087 case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int:
6088 case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk:
6089 case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk:
6090 case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk:
6091 case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk:
6092 case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk:
6093 case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk:
6094 case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz:
6095 case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz:
6096 case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz:
6097 case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz:
6098 case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz:
6099 case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz:
6100 case X86::VFIXUPIMMSDZrri:
6101 case X86::VFIXUPIMMSDZrrik:
6102 case X86::VFIXUPIMMSDZrrikz:
6103 case X86::VFPCLASSSDZrr:
6104 case X86::VFPCLASSSDZrrk:
6105 case X86::VGETEXPSDZr:
6106 case X86::VGETEXPSDZrk:
6107 case X86::VGETEXPSDZrkz:
6108 case X86::VGETMANTSDZrri:
6109 case X86::VGETMANTSDZrrik:
6110 case X86::VGETMANTSDZrrikz:
6111 case X86::VRANGESDZrri:
6112 case X86::VRANGESDZrrik:
6113 case X86::VRANGESDZrrikz:
6114 case X86::VRCP14SDZrr:
6115 case X86::VRCP14SDZrrk:
6116 case X86::VRCP14SDZrrkz:
6117 case X86::VRCP28SDZr:
6118 case X86::VRCP28SDZrk:
6119 case X86::VRCP28SDZrkz:
6120 case X86::VREDUCESDZrri:
6121 case X86::VREDUCESDZrrik:
6122 case X86::VREDUCESDZrrikz:
6123 case X86::VRNDSCALESDZr_Int:
6124 case X86::VRNDSCALESDZr_Intk:
6125 case X86::VRNDSCALESDZr_Intkz:
6126 case X86::VRSQRT14SDZrr:
6127 case X86::VRSQRT14SDZrrk:
6128 case X86::VRSQRT14SDZrrkz:
6129 case X86::VRSQRT28SDZr:
6130 case X86::VRSQRT28SDZrk:
6131 case X86::VRSQRT28SDZrkz:
6132 case X86::VSCALEFSDZrr:
6133 case X86::VSCALEFSDZrrk:
6134 case X86::VSCALEFSDZrrkz:
6135 return false;
6136 default:
6137 return true;
6138 }
6139 }
6140
6141 if ((Opc == X86::VMOVSHZrm || Opc == X86::VMOVSHZrm_alt) && RegSize > 16) {
6142 // These instructions only load 16 bits, we can't fold them if the
6143 // destination register is wider than 16 bits (2 bytes), and its user
6144 // instruction isn't scalar (SH).
6145 switch (UserOpc) {
6146 case X86::VADDSHZrr_Int:
6147 case X86::VCMPSHZrr_Int:
6148 case X86::VDIVSHZrr_Int:
6149 case X86::VMAXSHZrr_Int:
6150 case X86::VMINSHZrr_Int:
6151 case X86::VMULSHZrr_Int:
6152 case X86::VSUBSHZrr_Int:
6153 case X86::VADDSHZrr_Intk: case X86::VADDSHZrr_Intkz:
6154 case X86::VCMPSHZrr_Intk:
6155 case X86::VDIVSHZrr_Intk: case X86::VDIVSHZrr_Intkz:
6156 case X86::VMAXSHZrr_Intk: case X86::VMAXSHZrr_Intkz:
6157 case X86::VMINSHZrr_Intk: case X86::VMINSHZrr_Intkz:
6158 case X86::VMULSHZrr_Intk: case X86::VMULSHZrr_Intkz:
6159 case X86::VSUBSHZrr_Intk: case X86::VSUBSHZrr_Intkz:
6160 case X86::VFMADD132SHZr_Int: case X86::VFNMADD132SHZr_Int:
6161 case X86::VFMADD213SHZr_Int: case X86::VFNMADD213SHZr_Int:
6162 case X86::VFMADD231SHZr_Int: case X86::VFNMADD231SHZr_Int:
6163 case X86::VFMSUB132SHZr_Int: case X86::VFNMSUB132SHZr_Int:
6164 case X86::VFMSUB213SHZr_Int: case X86::VFNMSUB213SHZr_Int:
6165 case X86::VFMSUB231SHZr_Int: case X86::VFNMSUB231SHZr_Int:
6166 case X86::VFMADD132SHZr_Intk: case X86::VFNMADD132SHZr_Intk:
6167 case X86::VFMADD213SHZr_Intk: case X86::VFNMADD213SHZr_Intk:
6168 case X86::VFMADD231SHZr_Intk: case X86::VFNMADD231SHZr_Intk:
6169 case X86::VFMSUB132SHZr_Intk: case X86::VFNMSUB132SHZr_Intk:
6170 case X86::VFMSUB213SHZr_Intk: case X86::VFNMSUB213SHZr_Intk:
6171 case X86::VFMSUB231SHZr_Intk: case X86::VFNMSUB231SHZr_Intk:
6172 case X86::VFMADD132SHZr_Intkz: case X86::VFNMADD132SHZr_Intkz:
6173 case X86::VFMADD213SHZr_Intkz: case X86::VFNMADD213SHZr_Intkz:
6174 case X86::VFMADD231SHZr_Intkz: case X86::VFNMADD231SHZr_Intkz:
6175 case X86::VFMSUB132SHZr_Intkz: case X86::VFNMSUB132SHZr_Intkz:
6176 case X86::VFMSUB213SHZr_Intkz: case X86::VFNMSUB213SHZr_Intkz:
6177 case X86::VFMSUB231SHZr_Intkz: case X86::VFNMSUB231SHZr_Intkz:
6178 return false;
6179 default:
6180 return true;
6181 }
6182 }
6183
6184 return false;
6185 }
6186
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,MachineInstr & LoadMI,LiveIntervals * LIS) const6187 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
6188 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
6189 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
6190 LiveIntervals *LIS) const {
6191
6192 // TODO: Support the case where LoadMI loads a wide register, but MI
6193 // only uses a subreg.
6194 for (auto Op : Ops) {
6195 if (MI.getOperand(Op).getSubReg())
6196 return nullptr;
6197 }
6198
6199 // If loading from a FrameIndex, fold directly from the FrameIndex.
6200 unsigned NumOps = LoadMI.getDesc().getNumOperands();
6201 int FrameIndex;
6202 if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
6203 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
6204 return nullptr;
6205 return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS);
6206 }
6207
6208 // Check switch flag
6209 if (NoFusing) return nullptr;
6210
6211 // Avoid partial and undef register update stalls unless optimizing for size.
6212 if (!MF.getFunction().hasOptSize() &&
6213 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
6214 shouldPreventUndefRegUpdateMemFold(MF, MI)))
6215 return nullptr;
6216
6217 // Determine the alignment of the load.
6218 Align Alignment;
6219 if (LoadMI.hasOneMemOperand())
6220 Alignment = (*LoadMI.memoperands_begin())->getAlign();
6221 else
6222 switch (LoadMI.getOpcode()) {
6223 case X86::AVX512_512_SET0:
6224 case X86::AVX512_512_SETALLONES:
6225 Alignment = Align(64);
6226 break;
6227 case X86::AVX2_SETALLONES:
6228 case X86::AVX1_SETALLONES:
6229 case X86::AVX_SET0:
6230 case X86::AVX512_256_SET0:
6231 Alignment = Align(32);
6232 break;
6233 case X86::V_SET0:
6234 case X86::V_SETALLONES:
6235 case X86::AVX512_128_SET0:
6236 case X86::FsFLD0F128:
6237 case X86::AVX512_FsFLD0F128:
6238 Alignment = Align(16);
6239 break;
6240 case X86::MMX_SET0:
6241 case X86::FsFLD0SD:
6242 case X86::AVX512_FsFLD0SD:
6243 Alignment = Align(8);
6244 break;
6245 case X86::FsFLD0SS:
6246 case X86::AVX512_FsFLD0SS:
6247 Alignment = Align(4);
6248 break;
6249 case X86::AVX512_FsFLD0SH:
6250 Alignment = Align(2);
6251 break;
6252 default:
6253 return nullptr;
6254 }
6255 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
6256 unsigned NewOpc = 0;
6257 switch (MI.getOpcode()) {
6258 default: return nullptr;
6259 case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
6260 case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
6261 case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
6262 case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
6263 }
6264 // Change to CMPXXri r, 0 first.
6265 MI.setDesc(get(NewOpc));
6266 MI.getOperand(1).ChangeToImmediate(0);
6267 } else if (Ops.size() != 1)
6268 return nullptr;
6269
6270 // Make sure the subregisters match.
6271 // Otherwise we risk changing the size of the load.
6272 if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg())
6273 return nullptr;
6274
6275 SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
6276 switch (LoadMI.getOpcode()) {
6277 case X86::MMX_SET0:
6278 case X86::V_SET0:
6279 case X86::V_SETALLONES:
6280 case X86::AVX2_SETALLONES:
6281 case X86::AVX1_SETALLONES:
6282 case X86::AVX_SET0:
6283 case X86::AVX512_128_SET0:
6284 case X86::AVX512_256_SET0:
6285 case X86::AVX512_512_SET0:
6286 case X86::AVX512_512_SETALLONES:
6287 case X86::AVX512_FsFLD0SH:
6288 case X86::FsFLD0SD:
6289 case X86::AVX512_FsFLD0SD:
6290 case X86::FsFLD0SS:
6291 case X86::AVX512_FsFLD0SS:
6292 case X86::FsFLD0F128:
6293 case X86::AVX512_FsFLD0F128: {
6294 // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
6295 // Create a constant-pool entry and operands to load from it.
6296
6297 // Medium and large mode can't fold loads this way.
6298 if (MF.getTarget().getCodeModel() != CodeModel::Small &&
6299 MF.getTarget().getCodeModel() != CodeModel::Kernel)
6300 return nullptr;
6301
6302 // x86-32 PIC requires a PIC base register for constant pools.
6303 unsigned PICBase = 0;
6304 // Since we're using Small or Kernel code model, we can always use
6305 // RIP-relative addressing for a smaller encoding.
6306 if (Subtarget.is64Bit()) {
6307 PICBase = X86::RIP;
6308 } else if (MF.getTarget().isPositionIndependent()) {
6309 // FIXME: PICBase = getGlobalBaseReg(&MF);
6310 // This doesn't work for several reasons.
6311 // 1. GlobalBaseReg may have been spilled.
6312 // 2. It may not be live at MI.
6313 return nullptr;
6314 }
6315
6316 // Create a constant-pool entry.
6317 MachineConstantPool &MCP = *MF.getConstantPool();
6318 Type *Ty;
6319 unsigned Opc = LoadMI.getOpcode();
6320 if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS)
6321 Ty = Type::getFloatTy(MF.getFunction().getContext());
6322 else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD)
6323 Ty = Type::getDoubleTy(MF.getFunction().getContext());
6324 else if (Opc == X86::FsFLD0F128 || Opc == X86::AVX512_FsFLD0F128)
6325 Ty = Type::getFP128Ty(MF.getFunction().getContext());
6326 else if (Opc == X86::AVX512_FsFLD0SH)
6327 Ty = Type::getHalfTy(MF.getFunction().getContext());
6328 else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
6329 Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6330 16);
6331 else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
6332 Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES)
6333 Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6334 8);
6335 else if (Opc == X86::MMX_SET0)
6336 Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6337 2);
6338 else
6339 Ty = FixedVectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),
6340 4);
6341
6342 bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
6343 Opc == X86::AVX512_512_SETALLONES ||
6344 Opc == X86::AVX1_SETALLONES);
6345 const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
6346 Constant::getNullValue(Ty);
6347 unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
6348
6349 // Create operands to load from the constant pool entry.
6350 MOs.push_back(MachineOperand::CreateReg(PICBase, false));
6351 MOs.push_back(MachineOperand::CreateImm(1));
6352 MOs.push_back(MachineOperand::CreateReg(0, false));
6353 MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
6354 MOs.push_back(MachineOperand::CreateReg(0, false));
6355 break;
6356 }
6357 default: {
6358 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
6359 return nullptr;
6360
6361 // Folding a normal load. Just copy the load's address operands.
6362 MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,
6363 LoadMI.operands_begin() + NumOps);
6364 break;
6365 }
6366 }
6367 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
6368 /*Size=*/0, Alignment, /*AllowCommute=*/true);
6369 }
6370
6371 static SmallVector<MachineMemOperand *, 2>
extractLoadMMOs(ArrayRef<MachineMemOperand * > MMOs,MachineFunction & MF)6372 extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
6373 SmallVector<MachineMemOperand *, 2> LoadMMOs;
6374
6375 for (MachineMemOperand *MMO : MMOs) {
6376 if (!MMO->isLoad())
6377 continue;
6378
6379 if (!MMO->isStore()) {
6380 // Reuse the MMO.
6381 LoadMMOs.push_back(MMO);
6382 } else {
6383 // Clone the MMO and unset the store flag.
6384 LoadMMOs.push_back(MF.getMachineMemOperand(
6385 MMO, MMO->getFlags() & ~MachineMemOperand::MOStore));
6386 }
6387 }
6388
6389 return LoadMMOs;
6390 }
6391
6392 static SmallVector<MachineMemOperand *, 2>
extractStoreMMOs(ArrayRef<MachineMemOperand * > MMOs,MachineFunction & MF)6393 extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
6394 SmallVector<MachineMemOperand *, 2> StoreMMOs;
6395
6396 for (MachineMemOperand *MMO : MMOs) {
6397 if (!MMO->isStore())
6398 continue;
6399
6400 if (!MMO->isLoad()) {
6401 // Reuse the MMO.
6402 StoreMMOs.push_back(MMO);
6403 } else {
6404 // Clone the MMO and unset the load flag.
6405 StoreMMOs.push_back(MF.getMachineMemOperand(
6406 MMO, MMO->getFlags() & ~MachineMemOperand::MOLoad));
6407 }
6408 }
6409
6410 return StoreMMOs;
6411 }
6412
getBroadcastOpcode(const X86MemoryFoldTableEntry * I,const TargetRegisterClass * RC,const X86Subtarget & STI)6413 static unsigned getBroadcastOpcode(const X86MemoryFoldTableEntry *I,
6414 const TargetRegisterClass *RC,
6415 const X86Subtarget &STI) {
6416 assert(STI.hasAVX512() && "Expected at least AVX512!");
6417 unsigned SpillSize = STI.getRegisterInfo()->getSpillSize(*RC);
6418 assert((SpillSize == 64 || STI.hasVLX()) &&
6419 "Can't broadcast less than 64 bytes without AVX512VL!");
6420
6421 switch (I->Flags & TB_BCAST_MASK) {
6422 default: llvm_unreachable("Unexpected broadcast type!");
6423 case TB_BCAST_D:
6424 switch (SpillSize) {
6425 default: llvm_unreachable("Unknown spill size");
6426 case 16: return X86::VPBROADCASTDZ128rm;
6427 case 32: return X86::VPBROADCASTDZ256rm;
6428 case 64: return X86::VPBROADCASTDZrm;
6429 }
6430 break;
6431 case TB_BCAST_Q:
6432 switch (SpillSize) {
6433 default: llvm_unreachable("Unknown spill size");
6434 case 16: return X86::VPBROADCASTQZ128rm;
6435 case 32: return X86::VPBROADCASTQZ256rm;
6436 case 64: return X86::VPBROADCASTQZrm;
6437 }
6438 break;
6439 case TB_BCAST_SS:
6440 switch (SpillSize) {
6441 default: llvm_unreachable("Unknown spill size");
6442 case 16: return X86::VBROADCASTSSZ128rm;
6443 case 32: return X86::VBROADCASTSSZ256rm;
6444 case 64: return X86::VBROADCASTSSZrm;
6445 }
6446 break;
6447 case TB_BCAST_SD:
6448 switch (SpillSize) {
6449 default: llvm_unreachable("Unknown spill size");
6450 case 16: return X86::VMOVDDUPZ128rm;
6451 case 32: return X86::VBROADCASTSDZ256rm;
6452 case 64: return X86::VBROADCASTSDZrm;
6453 }
6454 break;
6455 }
6456 }
6457
unfoldMemoryOperand(MachineFunction & MF,MachineInstr & MI,unsigned Reg,bool UnfoldLoad,bool UnfoldStore,SmallVectorImpl<MachineInstr * > & NewMIs) const6458 bool X86InstrInfo::unfoldMemoryOperand(
6459 MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad,
6460 bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {
6461 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode());
6462 if (I == nullptr)
6463 return false;
6464 unsigned Opc = I->DstOp;
6465 unsigned Index = I->Flags & TB_INDEX_MASK;
6466 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6467 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6468 bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
6469 if (UnfoldLoad && !FoldedLoad)
6470 return false;
6471 UnfoldLoad &= FoldedLoad;
6472 if (UnfoldStore && !FoldedStore)
6473 return false;
6474 UnfoldStore &= FoldedStore;
6475
6476 const MCInstrDesc &MCID = get(Opc);
6477
6478 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
6479 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
6480 // TODO: Check if 32-byte or greater accesses are slow too?
6481 if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
6482 Subtarget.isUnalignedMem16Slow())
6483 // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
6484 // conservatively assume the address is unaligned. That's bad for
6485 // performance.
6486 return false;
6487 SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
6488 SmallVector<MachineOperand,2> BeforeOps;
6489 SmallVector<MachineOperand,2> AfterOps;
6490 SmallVector<MachineOperand,4> ImpOps;
6491 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
6492 MachineOperand &Op = MI.getOperand(i);
6493 if (i >= Index && i < Index + X86::AddrNumOperands)
6494 AddrOps.push_back(Op);
6495 else if (Op.isReg() && Op.isImplicit())
6496 ImpOps.push_back(Op);
6497 else if (i < Index)
6498 BeforeOps.push_back(Op);
6499 else if (i > Index)
6500 AfterOps.push_back(Op);
6501 }
6502
6503 // Emit the load or broadcast instruction.
6504 if (UnfoldLoad) {
6505 auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
6506
6507 unsigned Opc;
6508 if (FoldedBCast) {
6509 Opc = getBroadcastOpcode(I, RC, Subtarget);
6510 } else {
6511 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6512 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6513 Opc = getLoadRegOpcode(Reg, RC, isAligned, Subtarget);
6514 }
6515
6516 DebugLoc DL;
6517 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), Reg);
6518 for (unsigned i = 0, e = AddrOps.size(); i != e; ++i)
6519 MIB.add(AddrOps[i]);
6520 MIB.setMemRefs(MMOs);
6521 NewMIs.push_back(MIB);
6522
6523 if (UnfoldStore) {
6524 // Address operands cannot be marked isKill.
6525 for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
6526 MachineOperand &MO = NewMIs[0]->getOperand(i);
6527 if (MO.isReg())
6528 MO.setIsKill(false);
6529 }
6530 }
6531 }
6532
6533 // Emit the data processing instruction.
6534 MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true);
6535 MachineInstrBuilder MIB(MF, DataMI);
6536
6537 if (FoldedStore)
6538 MIB.addReg(Reg, RegState::Define);
6539 for (MachineOperand &BeforeOp : BeforeOps)
6540 MIB.add(BeforeOp);
6541 if (FoldedLoad)
6542 MIB.addReg(Reg);
6543 for (MachineOperand &AfterOp : AfterOps)
6544 MIB.add(AfterOp);
6545 for (MachineOperand &ImpOp : ImpOps) {
6546 MIB.addReg(ImpOp.getReg(),
6547 getDefRegState(ImpOp.isDef()) |
6548 RegState::Implicit |
6549 getKillRegState(ImpOp.isKill()) |
6550 getDeadRegState(ImpOp.isDead()) |
6551 getUndefRegState(ImpOp.isUndef()));
6552 }
6553 // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
6554 switch (DataMI->getOpcode()) {
6555 default: break;
6556 case X86::CMP64ri32:
6557 case X86::CMP64ri8:
6558 case X86::CMP32ri:
6559 case X86::CMP32ri8:
6560 case X86::CMP16ri:
6561 case X86::CMP16ri8:
6562 case X86::CMP8ri: {
6563 MachineOperand &MO0 = DataMI->getOperand(0);
6564 MachineOperand &MO1 = DataMI->getOperand(1);
6565 if (MO1.isImm() && MO1.getImm() == 0) {
6566 unsigned NewOpc;
6567 switch (DataMI->getOpcode()) {
6568 default: llvm_unreachable("Unreachable!");
6569 case X86::CMP64ri8:
6570 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
6571 case X86::CMP32ri8:
6572 case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
6573 case X86::CMP16ri8:
6574 case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
6575 case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
6576 }
6577 DataMI->setDesc(get(NewOpc));
6578 MO1.ChangeToRegister(MO0.getReg(), false);
6579 }
6580 }
6581 }
6582 NewMIs.push_back(DataMI);
6583
6584 // Emit the store instruction.
6585 if (UnfoldStore) {
6586 const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
6587 auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
6588 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16);
6589 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6590 unsigned Opc = getStoreRegOpcode(Reg, DstRC, isAligned, Subtarget);
6591 DebugLoc DL;
6592 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
6593 for (unsigned i = 0, e = AddrOps.size(); i != e; ++i)
6594 MIB.add(AddrOps[i]);
6595 MIB.addReg(Reg, RegState::Kill);
6596 MIB.setMemRefs(MMOs);
6597 NewMIs.push_back(MIB);
6598 }
6599
6600 return true;
6601 }
6602
6603 bool
unfoldMemoryOperand(SelectionDAG & DAG,SDNode * N,SmallVectorImpl<SDNode * > & NewNodes) const6604 X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
6605 SmallVectorImpl<SDNode*> &NewNodes) const {
6606 if (!N->isMachineOpcode())
6607 return false;
6608
6609 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode());
6610 if (I == nullptr)
6611 return false;
6612 unsigned Opc = I->DstOp;
6613 unsigned Index = I->Flags & TB_INDEX_MASK;
6614 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6615 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6616 bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
6617 const MCInstrDesc &MCID = get(Opc);
6618 MachineFunction &MF = DAG.getMachineFunction();
6619 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
6620 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
6621 unsigned NumDefs = MCID.NumDefs;
6622 std::vector<SDValue> AddrOps;
6623 std::vector<SDValue> BeforeOps;
6624 std::vector<SDValue> AfterOps;
6625 SDLoc dl(N);
6626 unsigned NumOps = N->getNumOperands();
6627 for (unsigned i = 0; i != NumOps-1; ++i) {
6628 SDValue Op = N->getOperand(i);
6629 if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
6630 AddrOps.push_back(Op);
6631 else if (i < Index-NumDefs)
6632 BeforeOps.push_back(Op);
6633 else if (i > Index-NumDefs)
6634 AfterOps.push_back(Op);
6635 }
6636 SDValue Chain = N->getOperand(NumOps-1);
6637 AddrOps.push_back(Chain);
6638
6639 // Emit the load instruction.
6640 SDNode *Load = nullptr;
6641 if (FoldedLoad) {
6642 EVT VT = *TRI.legalclasstypes_begin(*RC);
6643 auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
6644 if (MMOs.empty() && RC == &X86::VR128RegClass &&
6645 Subtarget.isUnalignedMem16Slow())
6646 // Do not introduce a slow unaligned load.
6647 return false;
6648 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
6649 // memory access is slow above.
6650
6651 unsigned Opc;
6652 if (FoldedBCast) {
6653 Opc = getBroadcastOpcode(I, RC, Subtarget);
6654 } else {
6655 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6656 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6657 Opc = getLoadRegOpcode(0, RC, isAligned, Subtarget);
6658 }
6659
6660 Load = DAG.getMachineNode(Opc, dl, VT, MVT::Other, AddrOps);
6661 NewNodes.push_back(Load);
6662
6663 // Preserve memory reference information.
6664 DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs);
6665 }
6666
6667 // Emit the data processing instruction.
6668 std::vector<EVT> VTs;
6669 const TargetRegisterClass *DstRC = nullptr;
6670 if (MCID.getNumDefs() > 0) {
6671 DstRC = getRegClass(MCID, 0, &RI, MF);
6672 VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
6673 }
6674 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
6675 EVT VT = N->getValueType(i);
6676 if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
6677 VTs.push_back(VT);
6678 }
6679 if (Load)
6680 BeforeOps.push_back(SDValue(Load, 0));
6681 llvm::append_range(BeforeOps, AfterOps);
6682 // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
6683 switch (Opc) {
6684 default: break;
6685 case X86::CMP64ri32:
6686 case X86::CMP64ri8:
6687 case X86::CMP32ri:
6688 case X86::CMP32ri8:
6689 case X86::CMP16ri:
6690 case X86::CMP16ri8:
6691 case X86::CMP8ri:
6692 if (isNullConstant(BeforeOps[1])) {
6693 switch (Opc) {
6694 default: llvm_unreachable("Unreachable!");
6695 case X86::CMP64ri8:
6696 case X86::CMP64ri32: Opc = X86::TEST64rr; break;
6697 case X86::CMP32ri8:
6698 case X86::CMP32ri: Opc = X86::TEST32rr; break;
6699 case X86::CMP16ri8:
6700 case X86::CMP16ri: Opc = X86::TEST16rr; break;
6701 case X86::CMP8ri: Opc = X86::TEST8rr; break;
6702 }
6703 BeforeOps[1] = BeforeOps[0];
6704 }
6705 }
6706 SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps);
6707 NewNodes.push_back(NewNode);
6708
6709 // Emit the store instruction.
6710 if (FoldedStore) {
6711 AddrOps.pop_back();
6712 AddrOps.push_back(SDValue(NewNode, 0));
6713 AddrOps.push_back(Chain);
6714 auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
6715 if (MMOs.empty() && RC == &X86::VR128RegClass &&
6716 Subtarget.isUnalignedMem16Slow())
6717 // Do not introduce a slow unaligned store.
6718 return false;
6719 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
6720 // memory access is slow above.
6721 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
6722 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
6723 SDNode *Store =
6724 DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
6725 dl, MVT::Other, AddrOps);
6726 NewNodes.push_back(Store);
6727
6728 // Preserve memory reference information.
6729 DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs);
6730 }
6731
6732 return true;
6733 }
6734
getOpcodeAfterMemoryUnfold(unsigned Opc,bool UnfoldLoad,bool UnfoldStore,unsigned * LoadRegIndex) const6735 unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
6736 bool UnfoldLoad, bool UnfoldStore,
6737 unsigned *LoadRegIndex) const {
6738 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc);
6739 if (I == nullptr)
6740 return 0;
6741 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
6742 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
6743 if (UnfoldLoad && !FoldedLoad)
6744 return 0;
6745 if (UnfoldStore && !FoldedStore)
6746 return 0;
6747 if (LoadRegIndex)
6748 *LoadRegIndex = I->Flags & TB_INDEX_MASK;
6749 return I->DstOp;
6750 }
6751
6752 bool
areLoadsFromSameBasePtr(SDNode * Load1,SDNode * Load2,int64_t & Offset1,int64_t & Offset2) const6753 X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
6754 int64_t &Offset1, int64_t &Offset2) const {
6755 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
6756 return false;
6757 unsigned Opc1 = Load1->getMachineOpcode();
6758 unsigned Opc2 = Load2->getMachineOpcode();
6759 switch (Opc1) {
6760 default: return false;
6761 case X86::MOV8rm:
6762 case X86::MOV16rm:
6763 case X86::MOV32rm:
6764 case X86::MOV64rm:
6765 case X86::LD_Fp32m:
6766 case X86::LD_Fp64m:
6767 case X86::LD_Fp80m:
6768 case X86::MOVSSrm:
6769 case X86::MOVSSrm_alt:
6770 case X86::MOVSDrm:
6771 case X86::MOVSDrm_alt:
6772 case X86::MMX_MOVD64rm:
6773 case X86::MMX_MOVQ64rm:
6774 case X86::MOVAPSrm:
6775 case X86::MOVUPSrm:
6776 case X86::MOVAPDrm:
6777 case X86::MOVUPDrm:
6778 case X86::MOVDQArm:
6779 case X86::MOVDQUrm:
6780 // AVX load instructions
6781 case X86::VMOVSSrm:
6782 case X86::VMOVSSrm_alt:
6783 case X86::VMOVSDrm:
6784 case X86::VMOVSDrm_alt:
6785 case X86::VMOVAPSrm:
6786 case X86::VMOVUPSrm:
6787 case X86::VMOVAPDrm:
6788 case X86::VMOVUPDrm:
6789 case X86::VMOVDQArm:
6790 case X86::VMOVDQUrm:
6791 case X86::VMOVAPSYrm:
6792 case X86::VMOVUPSYrm:
6793 case X86::VMOVAPDYrm:
6794 case X86::VMOVUPDYrm:
6795 case X86::VMOVDQAYrm:
6796 case X86::VMOVDQUYrm:
6797 // AVX512 load instructions
6798 case X86::VMOVSSZrm:
6799 case X86::VMOVSSZrm_alt:
6800 case X86::VMOVSDZrm:
6801 case X86::VMOVSDZrm_alt:
6802 case X86::VMOVAPSZ128rm:
6803 case X86::VMOVUPSZ128rm:
6804 case X86::VMOVAPSZ128rm_NOVLX:
6805 case X86::VMOVUPSZ128rm_NOVLX:
6806 case X86::VMOVAPDZ128rm:
6807 case X86::VMOVUPDZ128rm:
6808 case X86::VMOVDQU8Z128rm:
6809 case X86::VMOVDQU16Z128rm:
6810 case X86::VMOVDQA32Z128rm:
6811 case X86::VMOVDQU32Z128rm:
6812 case X86::VMOVDQA64Z128rm:
6813 case X86::VMOVDQU64Z128rm:
6814 case X86::VMOVAPSZ256rm:
6815 case X86::VMOVUPSZ256rm:
6816 case X86::VMOVAPSZ256rm_NOVLX:
6817 case X86::VMOVUPSZ256rm_NOVLX:
6818 case X86::VMOVAPDZ256rm:
6819 case X86::VMOVUPDZ256rm:
6820 case X86::VMOVDQU8Z256rm:
6821 case X86::VMOVDQU16Z256rm:
6822 case X86::VMOVDQA32Z256rm:
6823 case X86::VMOVDQU32Z256rm:
6824 case X86::VMOVDQA64Z256rm:
6825 case X86::VMOVDQU64Z256rm:
6826 case X86::VMOVAPSZrm:
6827 case X86::VMOVUPSZrm:
6828 case X86::VMOVAPDZrm:
6829 case X86::VMOVUPDZrm:
6830 case X86::VMOVDQU8Zrm:
6831 case X86::VMOVDQU16Zrm:
6832 case X86::VMOVDQA32Zrm:
6833 case X86::VMOVDQU32Zrm:
6834 case X86::VMOVDQA64Zrm:
6835 case X86::VMOVDQU64Zrm:
6836 case X86::KMOVBkm:
6837 case X86::KMOVWkm:
6838 case X86::KMOVDkm:
6839 case X86::KMOVQkm:
6840 break;
6841 }
6842 switch (Opc2) {
6843 default: return false;
6844 case X86::MOV8rm:
6845 case X86::MOV16rm:
6846 case X86::MOV32rm:
6847 case X86::MOV64rm:
6848 case X86::LD_Fp32m:
6849 case X86::LD_Fp64m:
6850 case X86::LD_Fp80m:
6851 case X86::MOVSSrm:
6852 case X86::MOVSSrm_alt:
6853 case X86::MOVSDrm:
6854 case X86::MOVSDrm_alt:
6855 case X86::MMX_MOVD64rm:
6856 case X86::MMX_MOVQ64rm:
6857 case X86::MOVAPSrm:
6858 case X86::MOVUPSrm:
6859 case X86::MOVAPDrm:
6860 case X86::MOVUPDrm:
6861 case X86::MOVDQArm:
6862 case X86::MOVDQUrm:
6863 // AVX load instructions
6864 case X86::VMOVSSrm:
6865 case X86::VMOVSSrm_alt:
6866 case X86::VMOVSDrm:
6867 case X86::VMOVSDrm_alt:
6868 case X86::VMOVAPSrm:
6869 case X86::VMOVUPSrm:
6870 case X86::VMOVAPDrm:
6871 case X86::VMOVUPDrm:
6872 case X86::VMOVDQArm:
6873 case X86::VMOVDQUrm:
6874 case X86::VMOVAPSYrm:
6875 case X86::VMOVUPSYrm:
6876 case X86::VMOVAPDYrm:
6877 case X86::VMOVUPDYrm:
6878 case X86::VMOVDQAYrm:
6879 case X86::VMOVDQUYrm:
6880 // AVX512 load instructions
6881 case X86::VMOVSSZrm:
6882 case X86::VMOVSSZrm_alt:
6883 case X86::VMOVSDZrm:
6884 case X86::VMOVSDZrm_alt:
6885 case X86::VMOVAPSZ128rm:
6886 case X86::VMOVUPSZ128rm:
6887 case X86::VMOVAPSZ128rm_NOVLX:
6888 case X86::VMOVUPSZ128rm_NOVLX:
6889 case X86::VMOVAPDZ128rm:
6890 case X86::VMOVUPDZ128rm:
6891 case X86::VMOVDQU8Z128rm:
6892 case X86::VMOVDQU16Z128rm:
6893 case X86::VMOVDQA32Z128rm:
6894 case X86::VMOVDQU32Z128rm:
6895 case X86::VMOVDQA64Z128rm:
6896 case X86::VMOVDQU64Z128rm:
6897 case X86::VMOVAPSZ256rm:
6898 case X86::VMOVUPSZ256rm:
6899 case X86::VMOVAPSZ256rm_NOVLX:
6900 case X86::VMOVUPSZ256rm_NOVLX:
6901 case X86::VMOVAPDZ256rm:
6902 case X86::VMOVUPDZ256rm:
6903 case X86::VMOVDQU8Z256rm:
6904 case X86::VMOVDQU16Z256rm:
6905 case X86::VMOVDQA32Z256rm:
6906 case X86::VMOVDQU32Z256rm:
6907 case X86::VMOVDQA64Z256rm:
6908 case X86::VMOVDQU64Z256rm:
6909 case X86::VMOVAPSZrm:
6910 case X86::VMOVUPSZrm:
6911 case X86::VMOVAPDZrm:
6912 case X86::VMOVUPDZrm:
6913 case X86::VMOVDQU8Zrm:
6914 case X86::VMOVDQU16Zrm:
6915 case X86::VMOVDQA32Zrm:
6916 case X86::VMOVDQU32Zrm:
6917 case X86::VMOVDQA64Zrm:
6918 case X86::VMOVDQU64Zrm:
6919 case X86::KMOVBkm:
6920 case X86::KMOVWkm:
6921 case X86::KMOVDkm:
6922 case X86::KMOVQkm:
6923 break;
6924 }
6925
6926 // Lambda to check if both the loads have the same value for an operand index.
6927 auto HasSameOp = [&](int I) {
6928 return Load1->getOperand(I) == Load2->getOperand(I);
6929 };
6930
6931 // All operands except the displacement should match.
6932 if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) ||
6933 !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg))
6934 return false;
6935
6936 // Chain Operand must be the same.
6937 if (!HasSameOp(5))
6938 return false;
6939
6940 // Now let's examine if the displacements are constants.
6941 auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp));
6942 auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp));
6943 if (!Disp1 || !Disp2)
6944 return false;
6945
6946 Offset1 = Disp1->getSExtValue();
6947 Offset2 = Disp2->getSExtValue();
6948 return true;
6949 }
6950
shouldScheduleLoadsNear(SDNode * Load1,SDNode * Load2,int64_t Offset1,int64_t Offset2,unsigned NumLoads) const6951 bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
6952 int64_t Offset1, int64_t Offset2,
6953 unsigned NumLoads) const {
6954 assert(Offset2 > Offset1);
6955 if ((Offset2 - Offset1) / 8 > 64)
6956 return false;
6957
6958 unsigned Opc1 = Load1->getMachineOpcode();
6959 unsigned Opc2 = Load2->getMachineOpcode();
6960 if (Opc1 != Opc2)
6961 return false; // FIXME: overly conservative?
6962
6963 switch (Opc1) {
6964 default: break;
6965 case X86::LD_Fp32m:
6966 case X86::LD_Fp64m:
6967 case X86::LD_Fp80m:
6968 case X86::MMX_MOVD64rm:
6969 case X86::MMX_MOVQ64rm:
6970 return false;
6971 }
6972
6973 EVT VT = Load1->getValueType(0);
6974 switch (VT.getSimpleVT().SimpleTy) {
6975 default:
6976 // XMM registers. In 64-bit mode we can be a bit more aggressive since we
6977 // have 16 of them to play with.
6978 if (Subtarget.is64Bit()) {
6979 if (NumLoads >= 3)
6980 return false;
6981 } else if (NumLoads) {
6982 return false;
6983 }
6984 break;
6985 case MVT::i8:
6986 case MVT::i16:
6987 case MVT::i32:
6988 case MVT::i64:
6989 case MVT::f32:
6990 case MVT::f64:
6991 if (NumLoads)
6992 return false;
6993 break;
6994 }
6995
6996 return true;
6997 }
6998
isSchedulingBoundary(const MachineInstr & MI,const MachineBasicBlock * MBB,const MachineFunction & MF) const6999 bool X86InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
7000 const MachineBasicBlock *MBB,
7001 const MachineFunction &MF) const {
7002
7003 // ENDBR instructions should not be scheduled around.
7004 unsigned Opcode = MI.getOpcode();
7005 if (Opcode == X86::ENDBR64 || Opcode == X86::ENDBR32 ||
7006 Opcode == X86::LDTILECFG)
7007 return true;
7008
7009 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF);
7010 }
7011
7012 bool X86InstrInfo::
reverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const7013 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
7014 assert(Cond.size() == 1 && "Invalid X86 branch condition!");
7015 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
7016 Cond[0].setImm(GetOppositeBranchCondition(CC));
7017 return false;
7018 }
7019
7020 bool X86InstrInfo::
isSafeToMoveRegClassDefs(const TargetRegisterClass * RC) const7021 isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
7022 // FIXME: Return false for x87 stack register classes for now. We can't
7023 // allow any loads of these registers before FpGet_ST0_80.
7024 return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
7025 RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
7026 RC == &X86::RFP80RegClass);
7027 }
7028
7029 /// Return a virtual register initialized with the
7030 /// the global base register value. Output instructions required to
7031 /// initialize the register in the function entry block, if necessary.
7032 ///
7033 /// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
7034 ///
getGlobalBaseReg(MachineFunction * MF) const7035 unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
7036 assert((!Subtarget.is64Bit() ||
7037 MF->getTarget().getCodeModel() == CodeModel::Medium ||
7038 MF->getTarget().getCodeModel() == CodeModel::Large) &&
7039 "X86-64 PIC uses RIP relative addressing");
7040
7041 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
7042 Register GlobalBaseReg = X86FI->getGlobalBaseReg();
7043 if (GlobalBaseReg != 0)
7044 return GlobalBaseReg;
7045
7046 // Create the register. The code to initialize it is inserted
7047 // later, by the CGBR pass (below).
7048 MachineRegisterInfo &RegInfo = MF->getRegInfo();
7049 GlobalBaseReg = RegInfo.createVirtualRegister(
7050 Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
7051 X86FI->setGlobalBaseReg(GlobalBaseReg);
7052 return GlobalBaseReg;
7053 }
7054
7055 // These are the replaceable SSE instructions. Some of these have Int variants
7056 // that we don't include here. We don't want to replace instructions selected
7057 // by intrinsics.
7058 static const uint16_t ReplaceableInstrs[][3] = {
7059 //PackedSingle PackedDouble PackedInt
7060 { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr },
7061 { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm },
7062 { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr },
7063 { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr },
7064 { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm },
7065 { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr },
7066 { X86::MOVSDmr, X86::MOVSDmr, X86::MOVPQI2QImr },
7067 { X86::MOVSSmr, X86::MOVSSmr, X86::MOVPDI2DImr },
7068 { X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm },
7069 { X86::MOVSDrm_alt,X86::MOVSDrm_alt,X86::MOVQI2PQIrm },
7070 { X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm },
7071 { X86::MOVSSrm_alt,X86::MOVSSrm_alt,X86::MOVDI2PDIrm },
7072 { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr },
7073 { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm },
7074 { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr },
7075 { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm },
7076 { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr },
7077 { X86::ORPSrm, X86::ORPDrm, X86::PORrm },
7078 { X86::ORPSrr, X86::ORPDrr, X86::PORrr },
7079 { X86::XORPSrm, X86::XORPDrm, X86::PXORrm },
7080 { X86::XORPSrr, X86::XORPDrr, X86::PXORrr },
7081 { X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm },
7082 { X86::MOVLHPSrr, X86::UNPCKLPDrr, X86::PUNPCKLQDQrr },
7083 { X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm },
7084 { X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr },
7085 { X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm },
7086 { X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr },
7087 { X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm },
7088 { X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr },
7089 { X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr },
7090 { X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr },
7091 // AVX 128-bit support
7092 { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr },
7093 { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm },
7094 { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr },
7095 { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr },
7096 { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm },
7097 { X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr },
7098 { X86::VMOVSDmr, X86::VMOVSDmr, X86::VMOVPQI2QImr },
7099 { X86::VMOVSSmr, X86::VMOVSSmr, X86::VMOVPDI2DImr },
7100 { X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm },
7101 { X86::VMOVSDrm_alt,X86::VMOVSDrm_alt,X86::VMOVQI2PQIrm },
7102 { X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm },
7103 { X86::VMOVSSrm_alt,X86::VMOVSSrm_alt,X86::VMOVDI2PDIrm },
7104 { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
7105 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm },
7106 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr },
7107 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm },
7108 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr },
7109 { X86::VORPSrm, X86::VORPDrm, X86::VPORrm },
7110 { X86::VORPSrr, X86::VORPDrr, X86::VPORrr },
7111 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm },
7112 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr },
7113 { X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm },
7114 { X86::VMOVLHPSrr, X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr },
7115 { X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm },
7116 { X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr },
7117 { X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm },
7118 { X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr },
7119 { X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm },
7120 { X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr },
7121 { X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr },
7122 { X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr },
7123 // AVX 256-bit support
7124 { X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr },
7125 { X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm },
7126 { X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr },
7127 { X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr },
7128 { X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm },
7129 { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr },
7130 { X86::VPERMPSYrm, X86::VPERMPSYrm, X86::VPERMDYrm },
7131 { X86::VPERMPSYrr, X86::VPERMPSYrr, X86::VPERMDYrr },
7132 { X86::VPERMPDYmi, X86::VPERMPDYmi, X86::VPERMQYmi },
7133 { X86::VPERMPDYri, X86::VPERMPDYri, X86::VPERMQYri },
7134 // AVX512 support
7135 { X86::VMOVLPSZ128mr, X86::VMOVLPDZ128mr, X86::VMOVPQI2QIZmr },
7136 { X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr },
7137 { X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr },
7138 { X86::VMOVNTPSZmr, X86::VMOVNTPDZmr, X86::VMOVNTDQZmr },
7139 { X86::VMOVSDZmr, X86::VMOVSDZmr, X86::VMOVPQI2QIZmr },
7140 { X86::VMOVSSZmr, X86::VMOVSSZmr, X86::VMOVPDI2DIZmr },
7141 { X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm },
7142 { X86::VMOVSDZrm_alt, X86::VMOVSDZrm_alt, X86::VMOVQI2PQIZrm },
7143 { X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm },
7144 { X86::VMOVSSZrm_alt, X86::VMOVSSZrm_alt, X86::VMOVDI2PDIZrm },
7145 { X86::VBROADCASTSSZ128rr,X86::VBROADCASTSSZ128rr,X86::VPBROADCASTDZ128rr },
7146 { X86::VBROADCASTSSZ128rm,X86::VBROADCASTSSZ128rm,X86::VPBROADCASTDZ128rm },
7147 { X86::VBROADCASTSSZ256rr,X86::VBROADCASTSSZ256rr,X86::VPBROADCASTDZ256rr },
7148 { X86::VBROADCASTSSZ256rm,X86::VBROADCASTSSZ256rm,X86::VPBROADCASTDZ256rm },
7149 { X86::VBROADCASTSSZrr, X86::VBROADCASTSSZrr, X86::VPBROADCASTDZrr },
7150 { X86::VBROADCASTSSZrm, X86::VBROADCASTSSZrm, X86::VPBROADCASTDZrm },
7151 { X86::VMOVDDUPZ128rr, X86::VMOVDDUPZ128rr, X86::VPBROADCASTQZ128rr },
7152 { X86::VMOVDDUPZ128rm, X86::VMOVDDUPZ128rm, X86::VPBROADCASTQZ128rm },
7153 { X86::VBROADCASTSDZ256rr,X86::VBROADCASTSDZ256rr,X86::VPBROADCASTQZ256rr },
7154 { X86::VBROADCASTSDZ256rm,X86::VBROADCASTSDZ256rm,X86::VPBROADCASTQZ256rm },
7155 { X86::VBROADCASTSDZrr, X86::VBROADCASTSDZrr, X86::VPBROADCASTQZrr },
7156 { X86::VBROADCASTSDZrm, X86::VBROADCASTSDZrm, X86::VPBROADCASTQZrm },
7157 { X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrr, X86::VINSERTI32x4Zrr },
7158 { X86::VINSERTF32x4Zrm, X86::VINSERTF32x4Zrm, X86::VINSERTI32x4Zrm },
7159 { X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrr, X86::VINSERTI32x8Zrr },
7160 { X86::VINSERTF32x8Zrm, X86::VINSERTF32x8Zrm, X86::VINSERTI32x8Zrm },
7161 { X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrr, X86::VINSERTI64x2Zrr },
7162 { X86::VINSERTF64x2Zrm, X86::VINSERTF64x2Zrm, X86::VINSERTI64x2Zrm },
7163 { X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrr, X86::VINSERTI64x4Zrr },
7164 { X86::VINSERTF64x4Zrm, X86::VINSERTF64x4Zrm, X86::VINSERTI64x4Zrm },
7165 { X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr },
7166 { X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm },
7167 { X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr },
7168 { X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm },
7169 { X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zrr, X86::VEXTRACTI32x4Zrr },
7170 { X86::VEXTRACTF32x4Zmr, X86::VEXTRACTF32x4Zmr, X86::VEXTRACTI32x4Zmr },
7171 { X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zrr, X86::VEXTRACTI32x8Zrr },
7172 { X86::VEXTRACTF32x8Zmr, X86::VEXTRACTF32x8Zmr, X86::VEXTRACTI32x8Zmr },
7173 { X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zrr, X86::VEXTRACTI64x2Zrr },
7174 { X86::VEXTRACTF64x2Zmr, X86::VEXTRACTF64x2Zmr, X86::VEXTRACTI64x2Zmr },
7175 { X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zrr, X86::VEXTRACTI64x4Zrr },
7176 { X86::VEXTRACTF64x4Zmr, X86::VEXTRACTF64x4Zmr, X86::VEXTRACTI64x4Zmr },
7177 { X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr },
7178 { X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr },
7179 { X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr },
7180 { X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr },
7181 { X86::VPERMILPSmi, X86::VPERMILPSmi, X86::VPSHUFDmi },
7182 { X86::VPERMILPSri, X86::VPERMILPSri, X86::VPSHUFDri },
7183 { X86::VPERMILPSZ128mi, X86::VPERMILPSZ128mi, X86::VPSHUFDZ128mi },
7184 { X86::VPERMILPSZ128ri, X86::VPERMILPSZ128ri, X86::VPSHUFDZ128ri },
7185 { X86::VPERMILPSZ256mi, X86::VPERMILPSZ256mi, X86::VPSHUFDZ256mi },
7186 { X86::VPERMILPSZ256ri, X86::VPERMILPSZ256ri, X86::VPSHUFDZ256ri },
7187 { X86::VPERMILPSZmi, X86::VPERMILPSZmi, X86::VPSHUFDZmi },
7188 { X86::VPERMILPSZri, X86::VPERMILPSZri, X86::VPSHUFDZri },
7189 { X86::VPERMPSZ256rm, X86::VPERMPSZ256rm, X86::VPERMDZ256rm },
7190 { X86::VPERMPSZ256rr, X86::VPERMPSZ256rr, X86::VPERMDZ256rr },
7191 { X86::VPERMPDZ256mi, X86::VPERMPDZ256mi, X86::VPERMQZ256mi },
7192 { X86::VPERMPDZ256ri, X86::VPERMPDZ256ri, X86::VPERMQZ256ri },
7193 { X86::VPERMPDZ256rm, X86::VPERMPDZ256rm, X86::VPERMQZ256rm },
7194 { X86::VPERMPDZ256rr, X86::VPERMPDZ256rr, X86::VPERMQZ256rr },
7195 { X86::VPERMPSZrm, X86::VPERMPSZrm, X86::VPERMDZrm },
7196 { X86::VPERMPSZrr, X86::VPERMPSZrr, X86::VPERMDZrr },
7197 { X86::VPERMPDZmi, X86::VPERMPDZmi, X86::VPERMQZmi },
7198 { X86::VPERMPDZri, X86::VPERMPDZri, X86::VPERMQZri },
7199 { X86::VPERMPDZrm, X86::VPERMPDZrm, X86::VPERMQZrm },
7200 { X86::VPERMPDZrr, X86::VPERMPDZrr, X86::VPERMQZrr },
7201 { X86::VUNPCKLPDZ256rm, X86::VUNPCKLPDZ256rm, X86::VPUNPCKLQDQZ256rm },
7202 { X86::VUNPCKLPDZ256rr, X86::VUNPCKLPDZ256rr, X86::VPUNPCKLQDQZ256rr },
7203 { X86::VUNPCKHPDZ256rm, X86::VUNPCKHPDZ256rm, X86::VPUNPCKHQDQZ256rm },
7204 { X86::VUNPCKHPDZ256rr, X86::VUNPCKHPDZ256rr, X86::VPUNPCKHQDQZ256rr },
7205 { X86::VUNPCKLPSZ256rm, X86::VUNPCKLPSZ256rm, X86::VPUNPCKLDQZ256rm },
7206 { X86::VUNPCKLPSZ256rr, X86::VUNPCKLPSZ256rr, X86::VPUNPCKLDQZ256rr },
7207 { X86::VUNPCKHPSZ256rm, X86::VUNPCKHPSZ256rm, X86::VPUNPCKHDQZ256rm },
7208 { X86::VUNPCKHPSZ256rr, X86::VUNPCKHPSZ256rr, X86::VPUNPCKHDQZ256rr },
7209 { X86::VUNPCKLPDZ128rm, X86::VUNPCKLPDZ128rm, X86::VPUNPCKLQDQZ128rm },
7210 { X86::VMOVLHPSZrr, X86::VUNPCKLPDZ128rr, X86::VPUNPCKLQDQZ128rr },
7211 { X86::VUNPCKHPDZ128rm, X86::VUNPCKHPDZ128rm, X86::VPUNPCKHQDQZ128rm },
7212 { X86::VUNPCKHPDZ128rr, X86::VUNPCKHPDZ128rr, X86::VPUNPCKHQDQZ128rr },
7213 { X86::VUNPCKLPSZ128rm, X86::VUNPCKLPSZ128rm, X86::VPUNPCKLDQZ128rm },
7214 { X86::VUNPCKLPSZ128rr, X86::VUNPCKLPSZ128rr, X86::VPUNPCKLDQZ128rr },
7215 { X86::VUNPCKHPSZ128rm, X86::VUNPCKHPSZ128rm, X86::VPUNPCKHDQZ128rm },
7216 { X86::VUNPCKHPSZ128rr, X86::VUNPCKHPSZ128rr, X86::VPUNPCKHDQZ128rr },
7217 { X86::VUNPCKLPDZrm, X86::VUNPCKLPDZrm, X86::VPUNPCKLQDQZrm },
7218 { X86::VUNPCKLPDZrr, X86::VUNPCKLPDZrr, X86::VPUNPCKLQDQZrr },
7219 { X86::VUNPCKHPDZrm, X86::VUNPCKHPDZrm, X86::VPUNPCKHQDQZrm },
7220 { X86::VUNPCKHPDZrr, X86::VUNPCKHPDZrr, X86::VPUNPCKHQDQZrr },
7221 { X86::VUNPCKLPSZrm, X86::VUNPCKLPSZrm, X86::VPUNPCKLDQZrm },
7222 { X86::VUNPCKLPSZrr, X86::VUNPCKLPSZrr, X86::VPUNPCKLDQZrr },
7223 { X86::VUNPCKHPSZrm, X86::VUNPCKHPSZrm, X86::VPUNPCKHDQZrm },
7224 { X86::VUNPCKHPSZrr, X86::VUNPCKHPSZrr, X86::VPUNPCKHDQZrr },
7225 { X86::VEXTRACTPSZmr, X86::VEXTRACTPSZmr, X86::VPEXTRDZmr },
7226 { X86::VEXTRACTPSZrr, X86::VEXTRACTPSZrr, X86::VPEXTRDZrr },
7227 };
7228
7229 static const uint16_t ReplaceableInstrsAVX2[][3] = {
7230 //PackedSingle PackedDouble PackedInt
7231 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm },
7232 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr },
7233 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm },
7234 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr },
7235 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm },
7236 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr },
7237 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm },
7238 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr },
7239 { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm },
7240 { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr },
7241 { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm},
7242 { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr},
7243 { X86::VMOVDDUPrm, X86::VMOVDDUPrm, X86::VPBROADCASTQrm},
7244 { X86::VMOVDDUPrr, X86::VMOVDDUPrr, X86::VPBROADCASTQrr},
7245 { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr},
7246 { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm},
7247 { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr},
7248 { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm},
7249 { X86::VBROADCASTF128, X86::VBROADCASTF128, X86::VBROADCASTI128 },
7250 { X86::VBLENDPSYrri, X86::VBLENDPSYrri, X86::VPBLENDDYrri },
7251 { X86::VBLENDPSYrmi, X86::VBLENDPSYrmi, X86::VPBLENDDYrmi },
7252 { X86::VPERMILPSYmi, X86::VPERMILPSYmi, X86::VPSHUFDYmi },
7253 { X86::VPERMILPSYri, X86::VPERMILPSYri, X86::VPSHUFDYri },
7254 { X86::VUNPCKLPDYrm, X86::VUNPCKLPDYrm, X86::VPUNPCKLQDQYrm },
7255 { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrr, X86::VPUNPCKLQDQYrr },
7256 { X86::VUNPCKHPDYrm, X86::VUNPCKHPDYrm, X86::VPUNPCKHQDQYrm },
7257 { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrr, X86::VPUNPCKHQDQYrr },
7258 { X86::VUNPCKLPSYrm, X86::VUNPCKLPSYrm, X86::VPUNPCKLDQYrm },
7259 { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrr, X86::VPUNPCKLDQYrr },
7260 { X86::VUNPCKHPSYrm, X86::VUNPCKHPSYrm, X86::VPUNPCKHDQYrm },
7261 { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrr, X86::VPUNPCKHDQYrr },
7262 };
7263
7264 static const uint16_t ReplaceableInstrsFP[][3] = {
7265 //PackedSingle PackedDouble
7266 { X86::MOVLPSrm, X86::MOVLPDrm, X86::INSTRUCTION_LIST_END },
7267 { X86::MOVHPSrm, X86::MOVHPDrm, X86::INSTRUCTION_LIST_END },
7268 { X86::MOVHPSmr, X86::MOVHPDmr, X86::INSTRUCTION_LIST_END },
7269 { X86::VMOVLPSrm, X86::VMOVLPDrm, X86::INSTRUCTION_LIST_END },
7270 { X86::VMOVHPSrm, X86::VMOVHPDrm, X86::INSTRUCTION_LIST_END },
7271 { X86::VMOVHPSmr, X86::VMOVHPDmr, X86::INSTRUCTION_LIST_END },
7272 { X86::VMOVLPSZ128rm, X86::VMOVLPDZ128rm, X86::INSTRUCTION_LIST_END },
7273 { X86::VMOVHPSZ128rm, X86::VMOVHPDZ128rm, X86::INSTRUCTION_LIST_END },
7274 { X86::VMOVHPSZ128mr, X86::VMOVHPDZ128mr, X86::INSTRUCTION_LIST_END },
7275 };
7276
7277 static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = {
7278 //PackedSingle PackedDouble PackedInt
7279 { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr },
7280 { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr },
7281 { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm },
7282 { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr },
7283 };
7284
7285 static const uint16_t ReplaceableInstrsAVX512[][4] = {
7286 // Two integer columns for 64-bit and 32-bit elements.
7287 //PackedSingle PackedDouble PackedInt PackedInt
7288 { X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr },
7289 { X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm },
7290 { X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr },
7291 { X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr },
7292 { X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm },
7293 { X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr },
7294 { X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm },
7295 { X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr },
7296 { X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr },
7297 { X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm },
7298 { X86::VMOVAPSZmr, X86::VMOVAPDZmr, X86::VMOVDQA64Zmr, X86::VMOVDQA32Zmr },
7299 { X86::VMOVAPSZrm, X86::VMOVAPDZrm, X86::VMOVDQA64Zrm, X86::VMOVDQA32Zrm },
7300 { X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr },
7301 { X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr },
7302 { X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm },
7303 };
7304
7305 static const uint16_t ReplaceableInstrsAVX512DQ[][4] = {
7306 // Two integer columns for 64-bit and 32-bit elements.
7307 //PackedSingle PackedDouble PackedInt PackedInt
7308 { X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
7309 { X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
7310 { X86::VANDPSZ128rm, X86::VANDPDZ128rm, X86::VPANDQZ128rm, X86::VPANDDZ128rm },
7311 { X86::VANDPSZ128rr, X86::VANDPDZ128rr, X86::VPANDQZ128rr, X86::VPANDDZ128rr },
7312 { X86::VORPSZ128rm, X86::VORPDZ128rm, X86::VPORQZ128rm, X86::VPORDZ128rm },
7313 { X86::VORPSZ128rr, X86::VORPDZ128rr, X86::VPORQZ128rr, X86::VPORDZ128rr },
7314 { X86::VXORPSZ128rm, X86::VXORPDZ128rm, X86::VPXORQZ128rm, X86::VPXORDZ128rm },
7315 { X86::VXORPSZ128rr, X86::VXORPDZ128rr, X86::VPXORQZ128rr, X86::VPXORDZ128rr },
7316 { X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
7317 { X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
7318 { X86::VANDPSZ256rm, X86::VANDPDZ256rm, X86::VPANDQZ256rm, X86::VPANDDZ256rm },
7319 { X86::VANDPSZ256rr, X86::VANDPDZ256rr, X86::VPANDQZ256rr, X86::VPANDDZ256rr },
7320 { X86::VORPSZ256rm, X86::VORPDZ256rm, X86::VPORQZ256rm, X86::VPORDZ256rm },
7321 { X86::VORPSZ256rr, X86::VORPDZ256rr, X86::VPORQZ256rr, X86::VPORDZ256rr },
7322 { X86::VXORPSZ256rm, X86::VXORPDZ256rm, X86::VPXORQZ256rm, X86::VPXORDZ256rm },
7323 { X86::VXORPSZ256rr, X86::VXORPDZ256rr, X86::VPXORQZ256rr, X86::VPXORDZ256rr },
7324 { X86::VANDNPSZrm, X86::VANDNPDZrm, X86::VPANDNQZrm, X86::VPANDNDZrm },
7325 { X86::VANDNPSZrr, X86::VANDNPDZrr, X86::VPANDNQZrr, X86::VPANDNDZrr },
7326 { X86::VANDPSZrm, X86::VANDPDZrm, X86::VPANDQZrm, X86::VPANDDZrm },
7327 { X86::VANDPSZrr, X86::VANDPDZrr, X86::VPANDQZrr, X86::VPANDDZrr },
7328 { X86::VORPSZrm, X86::VORPDZrm, X86::VPORQZrm, X86::VPORDZrm },
7329 { X86::VORPSZrr, X86::VORPDZrr, X86::VPORQZrr, X86::VPORDZrr },
7330 { X86::VXORPSZrm, X86::VXORPDZrm, X86::VPXORQZrm, X86::VPXORDZrm },
7331 { X86::VXORPSZrr, X86::VXORPDZrr, X86::VPXORQZrr, X86::VPXORDZrr },
7332 };
7333
7334 static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = {
7335 // Two integer columns for 64-bit and 32-bit elements.
7336 //PackedSingle PackedDouble
7337 //PackedInt PackedInt
7338 { X86::VANDNPSZ128rmk, X86::VANDNPDZ128rmk,
7339 X86::VPANDNQZ128rmk, X86::VPANDNDZ128rmk },
7340 { X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz,
7341 X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz },
7342 { X86::VANDNPSZ128rrk, X86::VANDNPDZ128rrk,
7343 X86::VPANDNQZ128rrk, X86::VPANDNDZ128rrk },
7344 { X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz,
7345 X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz },
7346 { X86::VANDPSZ128rmk, X86::VANDPDZ128rmk,
7347 X86::VPANDQZ128rmk, X86::VPANDDZ128rmk },
7348 { X86::VANDPSZ128rmkz, X86::VANDPDZ128rmkz,
7349 X86::VPANDQZ128rmkz, X86::VPANDDZ128rmkz },
7350 { X86::VANDPSZ128rrk, X86::VANDPDZ128rrk,
7351 X86::VPANDQZ128rrk, X86::VPANDDZ128rrk },
7352 { X86::VANDPSZ128rrkz, X86::VANDPDZ128rrkz,
7353 X86::VPANDQZ128rrkz, X86::VPANDDZ128rrkz },
7354 { X86::VORPSZ128rmk, X86::VORPDZ128rmk,
7355 X86::VPORQZ128rmk, X86::VPORDZ128rmk },
7356 { X86::VORPSZ128rmkz, X86::VORPDZ128rmkz,
7357 X86::VPORQZ128rmkz, X86::VPORDZ128rmkz },
7358 { X86::VORPSZ128rrk, X86::VORPDZ128rrk,
7359 X86::VPORQZ128rrk, X86::VPORDZ128rrk },
7360 { X86::VORPSZ128rrkz, X86::VORPDZ128rrkz,
7361 X86::VPORQZ128rrkz, X86::VPORDZ128rrkz },
7362 { X86::VXORPSZ128rmk, X86::VXORPDZ128rmk,
7363 X86::VPXORQZ128rmk, X86::VPXORDZ128rmk },
7364 { X86::VXORPSZ128rmkz, X86::VXORPDZ128rmkz,
7365 X86::VPXORQZ128rmkz, X86::VPXORDZ128rmkz },
7366 { X86::VXORPSZ128rrk, X86::VXORPDZ128rrk,
7367 X86::VPXORQZ128rrk, X86::VPXORDZ128rrk },
7368 { X86::VXORPSZ128rrkz, X86::VXORPDZ128rrkz,
7369 X86::VPXORQZ128rrkz, X86::VPXORDZ128rrkz },
7370 { X86::VANDNPSZ256rmk, X86::VANDNPDZ256rmk,
7371 X86::VPANDNQZ256rmk, X86::VPANDNDZ256rmk },
7372 { X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz,
7373 X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz },
7374 { X86::VANDNPSZ256rrk, X86::VANDNPDZ256rrk,
7375 X86::VPANDNQZ256rrk, X86::VPANDNDZ256rrk },
7376 { X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz,
7377 X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz },
7378 { X86::VANDPSZ256rmk, X86::VANDPDZ256rmk,
7379 X86::VPANDQZ256rmk, X86::VPANDDZ256rmk },
7380 { X86::VANDPSZ256rmkz, X86::VANDPDZ256rmkz,
7381 X86::VPANDQZ256rmkz, X86::VPANDDZ256rmkz },
7382 { X86::VANDPSZ256rrk, X86::VANDPDZ256rrk,
7383 X86::VPANDQZ256rrk, X86::VPANDDZ256rrk },
7384 { X86::VANDPSZ256rrkz, X86::VANDPDZ256rrkz,
7385 X86::VPANDQZ256rrkz, X86::VPANDDZ256rrkz },
7386 { X86::VORPSZ256rmk, X86::VORPDZ256rmk,
7387 X86::VPORQZ256rmk, X86::VPORDZ256rmk },
7388 { X86::VORPSZ256rmkz, X86::VORPDZ256rmkz,
7389 X86::VPORQZ256rmkz, X86::VPORDZ256rmkz },
7390 { X86::VORPSZ256rrk, X86::VORPDZ256rrk,
7391 X86::VPORQZ256rrk, X86::VPORDZ256rrk },
7392 { X86::VORPSZ256rrkz, X86::VORPDZ256rrkz,
7393 X86::VPORQZ256rrkz, X86::VPORDZ256rrkz },
7394 { X86::VXORPSZ256rmk, X86::VXORPDZ256rmk,
7395 X86::VPXORQZ256rmk, X86::VPXORDZ256rmk },
7396 { X86::VXORPSZ256rmkz, X86::VXORPDZ256rmkz,
7397 X86::VPXORQZ256rmkz, X86::VPXORDZ256rmkz },
7398 { X86::VXORPSZ256rrk, X86::VXORPDZ256rrk,
7399 X86::VPXORQZ256rrk, X86::VPXORDZ256rrk },
7400 { X86::VXORPSZ256rrkz, X86::VXORPDZ256rrkz,
7401 X86::VPXORQZ256rrkz, X86::VPXORDZ256rrkz },
7402 { X86::VANDNPSZrmk, X86::VANDNPDZrmk,
7403 X86::VPANDNQZrmk, X86::VPANDNDZrmk },
7404 { X86::VANDNPSZrmkz, X86::VANDNPDZrmkz,
7405 X86::VPANDNQZrmkz, X86::VPANDNDZrmkz },
7406 { X86::VANDNPSZrrk, X86::VANDNPDZrrk,
7407 X86::VPANDNQZrrk, X86::VPANDNDZrrk },
7408 { X86::VANDNPSZrrkz, X86::VANDNPDZrrkz,
7409 X86::VPANDNQZrrkz, X86::VPANDNDZrrkz },
7410 { X86::VANDPSZrmk, X86::VANDPDZrmk,
7411 X86::VPANDQZrmk, X86::VPANDDZrmk },
7412 { X86::VANDPSZrmkz, X86::VANDPDZrmkz,
7413 X86::VPANDQZrmkz, X86::VPANDDZrmkz },
7414 { X86::VANDPSZrrk, X86::VANDPDZrrk,
7415 X86::VPANDQZrrk, X86::VPANDDZrrk },
7416 { X86::VANDPSZrrkz, X86::VANDPDZrrkz,
7417 X86::VPANDQZrrkz, X86::VPANDDZrrkz },
7418 { X86::VORPSZrmk, X86::VORPDZrmk,
7419 X86::VPORQZrmk, X86::VPORDZrmk },
7420 { X86::VORPSZrmkz, X86::VORPDZrmkz,
7421 X86::VPORQZrmkz, X86::VPORDZrmkz },
7422 { X86::VORPSZrrk, X86::VORPDZrrk,
7423 X86::VPORQZrrk, X86::VPORDZrrk },
7424 { X86::VORPSZrrkz, X86::VORPDZrrkz,
7425 X86::VPORQZrrkz, X86::VPORDZrrkz },
7426 { X86::VXORPSZrmk, X86::VXORPDZrmk,
7427 X86::VPXORQZrmk, X86::VPXORDZrmk },
7428 { X86::VXORPSZrmkz, X86::VXORPDZrmkz,
7429 X86::VPXORQZrmkz, X86::VPXORDZrmkz },
7430 { X86::VXORPSZrrk, X86::VXORPDZrrk,
7431 X86::VPXORQZrrk, X86::VPXORDZrrk },
7432 { X86::VXORPSZrrkz, X86::VXORPDZrrkz,
7433 X86::VPXORQZrrkz, X86::VPXORDZrrkz },
7434 // Broadcast loads can be handled the same as masked operations to avoid
7435 // changing element size.
7436 { X86::VANDNPSZ128rmb, X86::VANDNPDZ128rmb,
7437 X86::VPANDNQZ128rmb, X86::VPANDNDZ128rmb },
7438 { X86::VANDPSZ128rmb, X86::VANDPDZ128rmb,
7439 X86::VPANDQZ128rmb, X86::VPANDDZ128rmb },
7440 { X86::VORPSZ128rmb, X86::VORPDZ128rmb,
7441 X86::VPORQZ128rmb, X86::VPORDZ128rmb },
7442 { X86::VXORPSZ128rmb, X86::VXORPDZ128rmb,
7443 X86::VPXORQZ128rmb, X86::VPXORDZ128rmb },
7444 { X86::VANDNPSZ256rmb, X86::VANDNPDZ256rmb,
7445 X86::VPANDNQZ256rmb, X86::VPANDNDZ256rmb },
7446 { X86::VANDPSZ256rmb, X86::VANDPDZ256rmb,
7447 X86::VPANDQZ256rmb, X86::VPANDDZ256rmb },
7448 { X86::VORPSZ256rmb, X86::VORPDZ256rmb,
7449 X86::VPORQZ256rmb, X86::VPORDZ256rmb },
7450 { X86::VXORPSZ256rmb, X86::VXORPDZ256rmb,
7451 X86::VPXORQZ256rmb, X86::VPXORDZ256rmb },
7452 { X86::VANDNPSZrmb, X86::VANDNPDZrmb,
7453 X86::VPANDNQZrmb, X86::VPANDNDZrmb },
7454 { X86::VANDPSZrmb, X86::VANDPDZrmb,
7455 X86::VPANDQZrmb, X86::VPANDDZrmb },
7456 { X86::VANDPSZrmb, X86::VANDPDZrmb,
7457 X86::VPANDQZrmb, X86::VPANDDZrmb },
7458 { X86::VORPSZrmb, X86::VORPDZrmb,
7459 X86::VPORQZrmb, X86::VPORDZrmb },
7460 { X86::VXORPSZrmb, X86::VXORPDZrmb,
7461 X86::VPXORQZrmb, X86::VPXORDZrmb },
7462 { X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk,
7463 X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk },
7464 { X86::VANDPSZ128rmbk, X86::VANDPDZ128rmbk,
7465 X86::VPANDQZ128rmbk, X86::VPANDDZ128rmbk },
7466 { X86::VORPSZ128rmbk, X86::VORPDZ128rmbk,
7467 X86::VPORQZ128rmbk, X86::VPORDZ128rmbk },
7468 { X86::VXORPSZ128rmbk, X86::VXORPDZ128rmbk,
7469 X86::VPXORQZ128rmbk, X86::VPXORDZ128rmbk },
7470 { X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk,
7471 X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk },
7472 { X86::VANDPSZ256rmbk, X86::VANDPDZ256rmbk,
7473 X86::VPANDQZ256rmbk, X86::VPANDDZ256rmbk },
7474 { X86::VORPSZ256rmbk, X86::VORPDZ256rmbk,
7475 X86::VPORQZ256rmbk, X86::VPORDZ256rmbk },
7476 { X86::VXORPSZ256rmbk, X86::VXORPDZ256rmbk,
7477 X86::VPXORQZ256rmbk, X86::VPXORDZ256rmbk },
7478 { X86::VANDNPSZrmbk, X86::VANDNPDZrmbk,
7479 X86::VPANDNQZrmbk, X86::VPANDNDZrmbk },
7480 { X86::VANDPSZrmbk, X86::VANDPDZrmbk,
7481 X86::VPANDQZrmbk, X86::VPANDDZrmbk },
7482 { X86::VANDPSZrmbk, X86::VANDPDZrmbk,
7483 X86::VPANDQZrmbk, X86::VPANDDZrmbk },
7484 { X86::VORPSZrmbk, X86::VORPDZrmbk,
7485 X86::VPORQZrmbk, X86::VPORDZrmbk },
7486 { X86::VXORPSZrmbk, X86::VXORPDZrmbk,
7487 X86::VPXORQZrmbk, X86::VPXORDZrmbk },
7488 { X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz,
7489 X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz},
7490 { X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz,
7491 X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz },
7492 { X86::VORPSZ128rmbkz, X86::VORPDZ128rmbkz,
7493 X86::VPORQZ128rmbkz, X86::VPORDZ128rmbkz },
7494 { X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz,
7495 X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz },
7496 { X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz,
7497 X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz},
7498 { X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz,
7499 X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz },
7500 { X86::VORPSZ256rmbkz, X86::VORPDZ256rmbkz,
7501 X86::VPORQZ256rmbkz, X86::VPORDZ256rmbkz },
7502 { X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz,
7503 X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz },
7504 { X86::VANDNPSZrmbkz, X86::VANDNPDZrmbkz,
7505 X86::VPANDNQZrmbkz, X86::VPANDNDZrmbkz },
7506 { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz,
7507 X86::VPANDQZrmbkz, X86::VPANDDZrmbkz },
7508 { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz,
7509 X86::VPANDQZrmbkz, X86::VPANDDZrmbkz },
7510 { X86::VORPSZrmbkz, X86::VORPDZrmbkz,
7511 X86::VPORQZrmbkz, X86::VPORDZrmbkz },
7512 { X86::VXORPSZrmbkz, X86::VXORPDZrmbkz,
7513 X86::VPXORQZrmbkz, X86::VPXORDZrmbkz },
7514 };
7515
7516 // NOTE: These should only be used by the custom domain methods.
7517 static const uint16_t ReplaceableBlendInstrs[][3] = {
7518 //PackedSingle PackedDouble PackedInt
7519 { X86::BLENDPSrmi, X86::BLENDPDrmi, X86::PBLENDWrmi },
7520 { X86::BLENDPSrri, X86::BLENDPDrri, X86::PBLENDWrri },
7521 { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDWrmi },
7522 { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDWrri },
7523 { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDWYrmi },
7524 { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDWYrri },
7525 };
7526 static const uint16_t ReplaceableBlendAVX2Instrs[][3] = {
7527 //PackedSingle PackedDouble PackedInt
7528 { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDDrmi },
7529 { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDDrri },
7530 { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDDYrmi },
7531 { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDDYrri },
7532 };
7533
7534 // Special table for changing EVEX logic instructions to VEX.
7535 // TODO: Should we run EVEX->VEX earlier?
7536 static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = {
7537 // Two integer columns for 64-bit and 32-bit elements.
7538 //PackedSingle PackedDouble PackedInt PackedInt
7539 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
7540 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
7541 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDQZ128rm, X86::VPANDDZ128rm },
7542 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDQZ128rr, X86::VPANDDZ128rr },
7543 { X86::VORPSrm, X86::VORPDrm, X86::VPORQZ128rm, X86::VPORDZ128rm },
7544 { X86::VORPSrr, X86::VORPDrr, X86::VPORQZ128rr, X86::VPORDZ128rr },
7545 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORQZ128rm, X86::VPXORDZ128rm },
7546 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORQZ128rr, X86::VPXORDZ128rr },
7547 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
7548 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
7549 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDQZ256rm, X86::VPANDDZ256rm },
7550 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDQZ256rr, X86::VPANDDZ256rr },
7551 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORQZ256rm, X86::VPORDZ256rm },
7552 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORQZ256rr, X86::VPORDZ256rr },
7553 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORQZ256rm, X86::VPXORDZ256rm },
7554 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORQZ256rr, X86::VPXORDZ256rr },
7555 };
7556
7557 // FIXME: Some shuffle and unpack instructions have equivalents in different
7558 // domains, but they require a bit more work than just switching opcodes.
7559
lookup(unsigned opcode,unsigned domain,ArrayRef<uint16_t[3]> Table)7560 static const uint16_t *lookup(unsigned opcode, unsigned domain,
7561 ArrayRef<uint16_t[3]> Table) {
7562 for (const uint16_t (&Row)[3] : Table)
7563 if (Row[domain-1] == opcode)
7564 return Row;
7565 return nullptr;
7566 }
7567
lookupAVX512(unsigned opcode,unsigned domain,ArrayRef<uint16_t[4]> Table)7568 static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain,
7569 ArrayRef<uint16_t[4]> Table) {
7570 // If this is the integer domain make sure to check both integer columns.
7571 for (const uint16_t (&Row)[4] : Table)
7572 if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode))
7573 return Row;
7574 return nullptr;
7575 }
7576
7577 // Helper to attempt to widen/narrow blend masks.
AdjustBlendMask(unsigned OldMask,unsigned OldWidth,unsigned NewWidth,unsigned * pNewMask=nullptr)7578 static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth,
7579 unsigned NewWidth, unsigned *pNewMask = nullptr) {
7580 assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
7581 "Illegal blend mask scale");
7582 unsigned NewMask = 0;
7583
7584 if ((OldWidth % NewWidth) == 0) {
7585 unsigned Scale = OldWidth / NewWidth;
7586 unsigned SubMask = (1u << Scale) - 1;
7587 for (unsigned i = 0; i != NewWidth; ++i) {
7588 unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
7589 if (Sub == SubMask)
7590 NewMask |= (1u << i);
7591 else if (Sub != 0x0)
7592 return false;
7593 }
7594 } else {
7595 unsigned Scale = NewWidth / OldWidth;
7596 unsigned SubMask = (1u << Scale) - 1;
7597 for (unsigned i = 0; i != OldWidth; ++i) {
7598 if (OldMask & (1 << i)) {
7599 NewMask |= (SubMask << (i * Scale));
7600 }
7601 }
7602 }
7603
7604 if (pNewMask)
7605 *pNewMask = NewMask;
7606 return true;
7607 }
7608
getExecutionDomainCustom(const MachineInstr & MI) const7609 uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const {
7610 unsigned Opcode = MI.getOpcode();
7611 unsigned NumOperands = MI.getDesc().getNumOperands();
7612
7613 auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) {
7614 uint16_t validDomains = 0;
7615 if (MI.getOperand(NumOperands - 1).isImm()) {
7616 unsigned Imm = MI.getOperand(NumOperands - 1).getImm();
7617 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4))
7618 validDomains |= 0x2; // PackedSingle
7619 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2))
7620 validDomains |= 0x4; // PackedDouble
7621 if (!Is256 || Subtarget.hasAVX2())
7622 validDomains |= 0x8; // PackedInt
7623 }
7624 return validDomains;
7625 };
7626
7627 switch (Opcode) {
7628 case X86::BLENDPDrmi:
7629 case X86::BLENDPDrri:
7630 case X86::VBLENDPDrmi:
7631 case X86::VBLENDPDrri:
7632 return GetBlendDomains(2, false);
7633 case X86::VBLENDPDYrmi:
7634 case X86::VBLENDPDYrri:
7635 return GetBlendDomains(4, true);
7636 case X86::BLENDPSrmi:
7637 case X86::BLENDPSrri:
7638 case X86::VBLENDPSrmi:
7639 case X86::VBLENDPSrri:
7640 case X86::VPBLENDDrmi:
7641 case X86::VPBLENDDrri:
7642 return GetBlendDomains(4, false);
7643 case X86::VBLENDPSYrmi:
7644 case X86::VBLENDPSYrri:
7645 case X86::VPBLENDDYrmi:
7646 case X86::VPBLENDDYrri:
7647 return GetBlendDomains(8, true);
7648 case X86::PBLENDWrmi:
7649 case X86::PBLENDWrri:
7650 case X86::VPBLENDWrmi:
7651 case X86::VPBLENDWrri:
7652 // Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks.
7653 case X86::VPBLENDWYrmi:
7654 case X86::VPBLENDWYrri:
7655 return GetBlendDomains(8, false);
7656 case X86::VPANDDZ128rr: case X86::VPANDDZ128rm:
7657 case X86::VPANDDZ256rr: case X86::VPANDDZ256rm:
7658 case X86::VPANDQZ128rr: case X86::VPANDQZ128rm:
7659 case X86::VPANDQZ256rr: case X86::VPANDQZ256rm:
7660 case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
7661 case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
7662 case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
7663 case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
7664 case X86::VPORDZ128rr: case X86::VPORDZ128rm:
7665 case X86::VPORDZ256rr: case X86::VPORDZ256rm:
7666 case X86::VPORQZ128rr: case X86::VPORQZ128rm:
7667 case X86::VPORQZ256rr: case X86::VPORQZ256rm:
7668 case X86::VPXORDZ128rr: case X86::VPXORDZ128rm:
7669 case X86::VPXORDZ256rr: case X86::VPXORDZ256rm:
7670 case X86::VPXORQZ128rr: case X86::VPXORQZ128rm:
7671 case X86::VPXORQZ256rr: case X86::VPXORQZ256rm:
7672 // If we don't have DQI see if we can still switch from an EVEX integer
7673 // instruction to a VEX floating point instruction.
7674 if (Subtarget.hasDQI())
7675 return 0;
7676
7677 if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16)
7678 return 0;
7679 if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16)
7680 return 0;
7681 // Register forms will have 3 operands. Memory form will have more.
7682 if (NumOperands == 3 &&
7683 RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16)
7684 return 0;
7685
7686 // All domains are valid.
7687 return 0xe;
7688 case X86::MOVHLPSrr:
7689 // We can swap domains when both inputs are the same register.
7690 // FIXME: This doesn't catch all the cases we would like. If the input
7691 // register isn't KILLed by the instruction, the two address instruction
7692 // pass puts a COPY on one input. The other input uses the original
7693 // register. This prevents the same physical register from being used by
7694 // both inputs.
7695 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
7696 MI.getOperand(0).getSubReg() == 0 &&
7697 MI.getOperand(1).getSubReg() == 0 &&
7698 MI.getOperand(2).getSubReg() == 0)
7699 return 0x6;
7700 return 0;
7701 case X86::SHUFPDrri:
7702 return 0x6;
7703 }
7704 return 0;
7705 }
7706
setExecutionDomainCustom(MachineInstr & MI,unsigned Domain) const7707 bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI,
7708 unsigned Domain) const {
7709 assert(Domain > 0 && Domain < 4 && "Invalid execution domain");
7710 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7711 assert(dom && "Not an SSE instruction");
7712
7713 unsigned Opcode = MI.getOpcode();
7714 unsigned NumOperands = MI.getDesc().getNumOperands();
7715
7716 auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) {
7717 if (MI.getOperand(NumOperands - 1).isImm()) {
7718 unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255;
7719 Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
7720 unsigned NewImm = Imm;
7721
7722 const uint16_t *table = lookup(Opcode, dom, ReplaceableBlendInstrs);
7723 if (!table)
7724 table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
7725
7726 if (Domain == 1) { // PackedSingle
7727 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
7728 } else if (Domain == 2) { // PackedDouble
7729 AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm);
7730 } else if (Domain == 3) { // PackedInt
7731 if (Subtarget.hasAVX2()) {
7732 // If we are already VPBLENDW use that, else use VPBLENDD.
7733 if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
7734 table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
7735 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
7736 }
7737 } else {
7738 assert(!Is256 && "128-bit vector expected");
7739 AdjustBlendMask(Imm, ImmWidth, 8, &NewImm);
7740 }
7741 }
7742
7743 assert(table && table[Domain - 1] && "Unknown domain op");
7744 MI.setDesc(get(table[Domain - 1]));
7745 MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
7746 }
7747 return true;
7748 };
7749
7750 switch (Opcode) {
7751 case X86::BLENDPDrmi:
7752 case X86::BLENDPDrri:
7753 case X86::VBLENDPDrmi:
7754 case X86::VBLENDPDrri:
7755 return SetBlendDomain(2, false);
7756 case X86::VBLENDPDYrmi:
7757 case X86::VBLENDPDYrri:
7758 return SetBlendDomain(4, true);
7759 case X86::BLENDPSrmi:
7760 case X86::BLENDPSrri:
7761 case X86::VBLENDPSrmi:
7762 case X86::VBLENDPSrri:
7763 case X86::VPBLENDDrmi:
7764 case X86::VPBLENDDrri:
7765 return SetBlendDomain(4, false);
7766 case X86::VBLENDPSYrmi:
7767 case X86::VBLENDPSYrri:
7768 case X86::VPBLENDDYrmi:
7769 case X86::VPBLENDDYrri:
7770 return SetBlendDomain(8, true);
7771 case X86::PBLENDWrmi:
7772 case X86::PBLENDWrri:
7773 case X86::VPBLENDWrmi:
7774 case X86::VPBLENDWrri:
7775 return SetBlendDomain(8, false);
7776 case X86::VPBLENDWYrmi:
7777 case X86::VPBLENDWYrri:
7778 return SetBlendDomain(16, true);
7779 case X86::VPANDDZ128rr: case X86::VPANDDZ128rm:
7780 case X86::VPANDDZ256rr: case X86::VPANDDZ256rm:
7781 case X86::VPANDQZ128rr: case X86::VPANDQZ128rm:
7782 case X86::VPANDQZ256rr: case X86::VPANDQZ256rm:
7783 case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
7784 case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
7785 case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
7786 case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
7787 case X86::VPORDZ128rr: case X86::VPORDZ128rm:
7788 case X86::VPORDZ256rr: case X86::VPORDZ256rm:
7789 case X86::VPORQZ128rr: case X86::VPORQZ128rm:
7790 case X86::VPORQZ256rr: case X86::VPORQZ256rm:
7791 case X86::VPXORDZ128rr: case X86::VPXORDZ128rm:
7792 case X86::VPXORDZ256rr: case X86::VPXORDZ256rm:
7793 case X86::VPXORQZ128rr: case X86::VPXORQZ128rm:
7794 case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: {
7795 // Without DQI, convert EVEX instructions to VEX instructions.
7796 if (Subtarget.hasDQI())
7797 return false;
7798
7799 const uint16_t *table = lookupAVX512(MI.getOpcode(), dom,
7800 ReplaceableCustomAVX512LogicInstrs);
7801 assert(table && "Instruction not found in table?");
7802 // Don't change integer Q instructions to D instructions and
7803 // use D intructions if we started with a PS instruction.
7804 if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
7805 Domain = 4;
7806 MI.setDesc(get(table[Domain - 1]));
7807 return true;
7808 }
7809 case X86::UNPCKHPDrr:
7810 case X86::MOVHLPSrr:
7811 // We just need to commute the instruction which will switch the domains.
7812 if (Domain != dom && Domain != 3 &&
7813 MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
7814 MI.getOperand(0).getSubReg() == 0 &&
7815 MI.getOperand(1).getSubReg() == 0 &&
7816 MI.getOperand(2).getSubReg() == 0) {
7817 commuteInstruction(MI, false);
7818 return true;
7819 }
7820 // We must always return true for MOVHLPSrr.
7821 if (Opcode == X86::MOVHLPSrr)
7822 return true;
7823 break;
7824 case X86::SHUFPDrri: {
7825 if (Domain == 1) {
7826 unsigned Imm = MI.getOperand(3).getImm();
7827 unsigned NewImm = 0x44;
7828 if (Imm & 1) NewImm |= 0x0a;
7829 if (Imm & 2) NewImm |= 0xa0;
7830 MI.getOperand(3).setImm(NewImm);
7831 MI.setDesc(get(X86::SHUFPSrri));
7832 }
7833 return true;
7834 }
7835 }
7836 return false;
7837 }
7838
7839 std::pair<uint16_t, uint16_t>
getExecutionDomain(const MachineInstr & MI) const7840 X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const {
7841 uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7842 unsigned opcode = MI.getOpcode();
7843 uint16_t validDomains = 0;
7844 if (domain) {
7845 // Attempt to match for custom instructions.
7846 validDomains = getExecutionDomainCustom(MI);
7847 if (validDomains)
7848 return std::make_pair(domain, validDomains);
7849
7850 if (lookup(opcode, domain, ReplaceableInstrs)) {
7851 validDomains = 0xe;
7852 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) {
7853 validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
7854 } else if (lookup(opcode, domain, ReplaceableInstrsFP)) {
7855 validDomains = 0x6;
7856 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
7857 // Insert/extract instructions should only effect domain if AVX2
7858 // is enabled.
7859 if (!Subtarget.hasAVX2())
7860 return std::make_pair(0, 0);
7861 validDomains = 0xe;
7862 } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
7863 validDomains = 0xe;
7864 } else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain,
7865 ReplaceableInstrsAVX512DQ)) {
7866 validDomains = 0xe;
7867 } else if (Subtarget.hasDQI()) {
7868 if (const uint16_t *table = lookupAVX512(opcode, domain,
7869 ReplaceableInstrsAVX512DQMasked)) {
7870 if (domain == 1 || (domain == 3 && table[3] == opcode))
7871 validDomains = 0xa;
7872 else
7873 validDomains = 0xc;
7874 }
7875 }
7876 }
7877 return std::make_pair(domain, validDomains);
7878 }
7879
setExecutionDomain(MachineInstr & MI,unsigned Domain) const7880 void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const {
7881 assert(Domain>0 && Domain<4 && "Invalid execution domain");
7882 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
7883 assert(dom && "Not an SSE instruction");
7884
7885 // Attempt to match for custom instructions.
7886 if (setExecutionDomainCustom(MI, Domain))
7887 return;
7888
7889 const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs);
7890 if (!table) { // try the other table
7891 assert((Subtarget.hasAVX2() || Domain < 3) &&
7892 "256-bit vector operations only available in AVX2");
7893 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2);
7894 }
7895 if (!table) { // try the FP table
7896 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsFP);
7897 assert((!table || Domain < 3) &&
7898 "Can only select PackedSingle or PackedDouble");
7899 }
7900 if (!table) { // try the other table
7901 assert(Subtarget.hasAVX2() &&
7902 "256-bit insert/extract only available in AVX2");
7903 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
7904 }
7905 if (!table) { // try the AVX512 table
7906 assert(Subtarget.hasAVX512() && "Requires AVX-512");
7907 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512);
7908 // Don't change integer Q instructions to D instructions.
7909 if (table && Domain == 3 && table[3] == MI.getOpcode())
7910 Domain = 4;
7911 }
7912 if (!table) { // try the AVX512DQ table
7913 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
7914 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
7915 // Don't change integer Q instructions to D instructions and
7916 // use D instructions if we started with a PS instruction.
7917 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
7918 Domain = 4;
7919 }
7920 if (!table) { // try the AVX512DQMasked table
7921 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
7922 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
7923 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
7924 Domain = 4;
7925 }
7926 assert(table && "Cannot change domain");
7927 MI.setDesc(get(table[Domain - 1]));
7928 }
7929
7930 /// Return the noop instruction to use for a noop.
getNop() const7931 MCInst X86InstrInfo::getNop() const {
7932 MCInst Nop;
7933 Nop.setOpcode(X86::NOOP);
7934 return Nop;
7935 }
7936
isHighLatencyDef(int opc) const7937 bool X86InstrInfo::isHighLatencyDef(int opc) const {
7938 switch (opc) {
7939 default: return false;
7940 case X86::DIVPDrm:
7941 case X86::DIVPDrr:
7942 case X86::DIVPSrm:
7943 case X86::DIVPSrr:
7944 case X86::DIVSDrm:
7945 case X86::DIVSDrm_Int:
7946 case X86::DIVSDrr:
7947 case X86::DIVSDrr_Int:
7948 case X86::DIVSSrm:
7949 case X86::DIVSSrm_Int:
7950 case X86::DIVSSrr:
7951 case X86::DIVSSrr_Int:
7952 case X86::SQRTPDm:
7953 case X86::SQRTPDr:
7954 case X86::SQRTPSm:
7955 case X86::SQRTPSr:
7956 case X86::SQRTSDm:
7957 case X86::SQRTSDm_Int:
7958 case X86::SQRTSDr:
7959 case X86::SQRTSDr_Int:
7960 case X86::SQRTSSm:
7961 case X86::SQRTSSm_Int:
7962 case X86::SQRTSSr:
7963 case X86::SQRTSSr_Int:
7964 // AVX instructions with high latency
7965 case X86::VDIVPDrm:
7966 case X86::VDIVPDrr:
7967 case X86::VDIVPDYrm:
7968 case X86::VDIVPDYrr:
7969 case X86::VDIVPSrm:
7970 case X86::VDIVPSrr:
7971 case X86::VDIVPSYrm:
7972 case X86::VDIVPSYrr:
7973 case X86::VDIVSDrm:
7974 case X86::VDIVSDrm_Int:
7975 case X86::VDIVSDrr:
7976 case X86::VDIVSDrr_Int:
7977 case X86::VDIVSSrm:
7978 case X86::VDIVSSrm_Int:
7979 case X86::VDIVSSrr:
7980 case X86::VDIVSSrr_Int:
7981 case X86::VSQRTPDm:
7982 case X86::VSQRTPDr:
7983 case X86::VSQRTPDYm:
7984 case X86::VSQRTPDYr:
7985 case X86::VSQRTPSm:
7986 case X86::VSQRTPSr:
7987 case X86::VSQRTPSYm:
7988 case X86::VSQRTPSYr:
7989 case X86::VSQRTSDm:
7990 case X86::VSQRTSDm_Int:
7991 case X86::VSQRTSDr:
7992 case X86::VSQRTSDr_Int:
7993 case X86::VSQRTSSm:
7994 case X86::VSQRTSSm_Int:
7995 case X86::VSQRTSSr:
7996 case X86::VSQRTSSr_Int:
7997 // AVX512 instructions with high latency
7998 case X86::VDIVPDZ128rm:
7999 case X86::VDIVPDZ128rmb:
8000 case X86::VDIVPDZ128rmbk:
8001 case X86::VDIVPDZ128rmbkz:
8002 case X86::VDIVPDZ128rmk:
8003 case X86::VDIVPDZ128rmkz:
8004 case X86::VDIVPDZ128rr:
8005 case X86::VDIVPDZ128rrk:
8006 case X86::VDIVPDZ128rrkz:
8007 case X86::VDIVPDZ256rm:
8008 case X86::VDIVPDZ256rmb:
8009 case X86::VDIVPDZ256rmbk:
8010 case X86::VDIVPDZ256rmbkz:
8011 case X86::VDIVPDZ256rmk:
8012 case X86::VDIVPDZ256rmkz:
8013 case X86::VDIVPDZ256rr:
8014 case X86::VDIVPDZ256rrk:
8015 case X86::VDIVPDZ256rrkz:
8016 case X86::VDIVPDZrrb:
8017 case X86::VDIVPDZrrbk:
8018 case X86::VDIVPDZrrbkz:
8019 case X86::VDIVPDZrm:
8020 case X86::VDIVPDZrmb:
8021 case X86::VDIVPDZrmbk:
8022 case X86::VDIVPDZrmbkz:
8023 case X86::VDIVPDZrmk:
8024 case X86::VDIVPDZrmkz:
8025 case X86::VDIVPDZrr:
8026 case X86::VDIVPDZrrk:
8027 case X86::VDIVPDZrrkz:
8028 case X86::VDIVPSZ128rm:
8029 case X86::VDIVPSZ128rmb:
8030 case X86::VDIVPSZ128rmbk:
8031 case X86::VDIVPSZ128rmbkz:
8032 case X86::VDIVPSZ128rmk:
8033 case X86::VDIVPSZ128rmkz:
8034 case X86::VDIVPSZ128rr:
8035 case X86::VDIVPSZ128rrk:
8036 case X86::VDIVPSZ128rrkz:
8037 case X86::VDIVPSZ256rm:
8038 case X86::VDIVPSZ256rmb:
8039 case X86::VDIVPSZ256rmbk:
8040 case X86::VDIVPSZ256rmbkz:
8041 case X86::VDIVPSZ256rmk:
8042 case X86::VDIVPSZ256rmkz:
8043 case X86::VDIVPSZ256rr:
8044 case X86::VDIVPSZ256rrk:
8045 case X86::VDIVPSZ256rrkz:
8046 case X86::VDIVPSZrrb:
8047 case X86::VDIVPSZrrbk:
8048 case X86::VDIVPSZrrbkz:
8049 case X86::VDIVPSZrm:
8050 case X86::VDIVPSZrmb:
8051 case X86::VDIVPSZrmbk:
8052 case X86::VDIVPSZrmbkz:
8053 case X86::VDIVPSZrmk:
8054 case X86::VDIVPSZrmkz:
8055 case X86::VDIVPSZrr:
8056 case X86::VDIVPSZrrk:
8057 case X86::VDIVPSZrrkz:
8058 case X86::VDIVSDZrm:
8059 case X86::VDIVSDZrr:
8060 case X86::VDIVSDZrm_Int:
8061 case X86::VDIVSDZrm_Intk:
8062 case X86::VDIVSDZrm_Intkz:
8063 case X86::VDIVSDZrr_Int:
8064 case X86::VDIVSDZrr_Intk:
8065 case X86::VDIVSDZrr_Intkz:
8066 case X86::VDIVSDZrrb_Int:
8067 case X86::VDIVSDZrrb_Intk:
8068 case X86::VDIVSDZrrb_Intkz:
8069 case X86::VDIVSSZrm:
8070 case X86::VDIVSSZrr:
8071 case X86::VDIVSSZrm_Int:
8072 case X86::VDIVSSZrm_Intk:
8073 case X86::VDIVSSZrm_Intkz:
8074 case X86::VDIVSSZrr_Int:
8075 case X86::VDIVSSZrr_Intk:
8076 case X86::VDIVSSZrr_Intkz:
8077 case X86::VDIVSSZrrb_Int:
8078 case X86::VDIVSSZrrb_Intk:
8079 case X86::VDIVSSZrrb_Intkz:
8080 case X86::VSQRTPDZ128m:
8081 case X86::VSQRTPDZ128mb:
8082 case X86::VSQRTPDZ128mbk:
8083 case X86::VSQRTPDZ128mbkz:
8084 case X86::VSQRTPDZ128mk:
8085 case X86::VSQRTPDZ128mkz:
8086 case X86::VSQRTPDZ128r:
8087 case X86::VSQRTPDZ128rk:
8088 case X86::VSQRTPDZ128rkz:
8089 case X86::VSQRTPDZ256m:
8090 case X86::VSQRTPDZ256mb:
8091 case X86::VSQRTPDZ256mbk:
8092 case X86::VSQRTPDZ256mbkz:
8093 case X86::VSQRTPDZ256mk:
8094 case X86::VSQRTPDZ256mkz:
8095 case X86::VSQRTPDZ256r:
8096 case X86::VSQRTPDZ256rk:
8097 case X86::VSQRTPDZ256rkz:
8098 case X86::VSQRTPDZm:
8099 case X86::VSQRTPDZmb:
8100 case X86::VSQRTPDZmbk:
8101 case X86::VSQRTPDZmbkz:
8102 case X86::VSQRTPDZmk:
8103 case X86::VSQRTPDZmkz:
8104 case X86::VSQRTPDZr:
8105 case X86::VSQRTPDZrb:
8106 case X86::VSQRTPDZrbk:
8107 case X86::VSQRTPDZrbkz:
8108 case X86::VSQRTPDZrk:
8109 case X86::VSQRTPDZrkz:
8110 case X86::VSQRTPSZ128m:
8111 case X86::VSQRTPSZ128mb:
8112 case X86::VSQRTPSZ128mbk:
8113 case X86::VSQRTPSZ128mbkz:
8114 case X86::VSQRTPSZ128mk:
8115 case X86::VSQRTPSZ128mkz:
8116 case X86::VSQRTPSZ128r:
8117 case X86::VSQRTPSZ128rk:
8118 case X86::VSQRTPSZ128rkz:
8119 case X86::VSQRTPSZ256m:
8120 case X86::VSQRTPSZ256mb:
8121 case X86::VSQRTPSZ256mbk:
8122 case X86::VSQRTPSZ256mbkz:
8123 case X86::VSQRTPSZ256mk:
8124 case X86::VSQRTPSZ256mkz:
8125 case X86::VSQRTPSZ256r:
8126 case X86::VSQRTPSZ256rk:
8127 case X86::VSQRTPSZ256rkz:
8128 case X86::VSQRTPSZm:
8129 case X86::VSQRTPSZmb:
8130 case X86::VSQRTPSZmbk:
8131 case X86::VSQRTPSZmbkz:
8132 case X86::VSQRTPSZmk:
8133 case X86::VSQRTPSZmkz:
8134 case X86::VSQRTPSZr:
8135 case X86::VSQRTPSZrb:
8136 case X86::VSQRTPSZrbk:
8137 case X86::VSQRTPSZrbkz:
8138 case X86::VSQRTPSZrk:
8139 case X86::VSQRTPSZrkz:
8140 case X86::VSQRTSDZm:
8141 case X86::VSQRTSDZm_Int:
8142 case X86::VSQRTSDZm_Intk:
8143 case X86::VSQRTSDZm_Intkz:
8144 case X86::VSQRTSDZr:
8145 case X86::VSQRTSDZr_Int:
8146 case X86::VSQRTSDZr_Intk:
8147 case X86::VSQRTSDZr_Intkz:
8148 case X86::VSQRTSDZrb_Int:
8149 case X86::VSQRTSDZrb_Intk:
8150 case X86::VSQRTSDZrb_Intkz:
8151 case X86::VSQRTSSZm:
8152 case X86::VSQRTSSZm_Int:
8153 case X86::VSQRTSSZm_Intk:
8154 case X86::VSQRTSSZm_Intkz:
8155 case X86::VSQRTSSZr:
8156 case X86::VSQRTSSZr_Int:
8157 case X86::VSQRTSSZr_Intk:
8158 case X86::VSQRTSSZr_Intkz:
8159 case X86::VSQRTSSZrb_Int:
8160 case X86::VSQRTSSZrb_Intk:
8161 case X86::VSQRTSSZrb_Intkz:
8162
8163 case X86::VGATHERDPDYrm:
8164 case X86::VGATHERDPDZ128rm:
8165 case X86::VGATHERDPDZ256rm:
8166 case X86::VGATHERDPDZrm:
8167 case X86::VGATHERDPDrm:
8168 case X86::VGATHERDPSYrm:
8169 case X86::VGATHERDPSZ128rm:
8170 case X86::VGATHERDPSZ256rm:
8171 case X86::VGATHERDPSZrm:
8172 case X86::VGATHERDPSrm:
8173 case X86::VGATHERPF0DPDm:
8174 case X86::VGATHERPF0DPSm:
8175 case X86::VGATHERPF0QPDm:
8176 case X86::VGATHERPF0QPSm:
8177 case X86::VGATHERPF1DPDm:
8178 case X86::VGATHERPF1DPSm:
8179 case X86::VGATHERPF1QPDm:
8180 case X86::VGATHERPF1QPSm:
8181 case X86::VGATHERQPDYrm:
8182 case X86::VGATHERQPDZ128rm:
8183 case X86::VGATHERQPDZ256rm:
8184 case X86::VGATHERQPDZrm:
8185 case X86::VGATHERQPDrm:
8186 case X86::VGATHERQPSYrm:
8187 case X86::VGATHERQPSZ128rm:
8188 case X86::VGATHERQPSZ256rm:
8189 case X86::VGATHERQPSZrm:
8190 case X86::VGATHERQPSrm:
8191 case X86::VPGATHERDDYrm:
8192 case X86::VPGATHERDDZ128rm:
8193 case X86::VPGATHERDDZ256rm:
8194 case X86::VPGATHERDDZrm:
8195 case X86::VPGATHERDDrm:
8196 case X86::VPGATHERDQYrm:
8197 case X86::VPGATHERDQZ128rm:
8198 case X86::VPGATHERDQZ256rm:
8199 case X86::VPGATHERDQZrm:
8200 case X86::VPGATHERDQrm:
8201 case X86::VPGATHERQDYrm:
8202 case X86::VPGATHERQDZ128rm:
8203 case X86::VPGATHERQDZ256rm:
8204 case X86::VPGATHERQDZrm:
8205 case X86::VPGATHERQDrm:
8206 case X86::VPGATHERQQYrm:
8207 case X86::VPGATHERQQZ128rm:
8208 case X86::VPGATHERQQZ256rm:
8209 case X86::VPGATHERQQZrm:
8210 case X86::VPGATHERQQrm:
8211 case X86::VSCATTERDPDZ128mr:
8212 case X86::VSCATTERDPDZ256mr:
8213 case X86::VSCATTERDPDZmr:
8214 case X86::VSCATTERDPSZ128mr:
8215 case X86::VSCATTERDPSZ256mr:
8216 case X86::VSCATTERDPSZmr:
8217 case X86::VSCATTERPF0DPDm:
8218 case X86::VSCATTERPF0DPSm:
8219 case X86::VSCATTERPF0QPDm:
8220 case X86::VSCATTERPF0QPSm:
8221 case X86::VSCATTERPF1DPDm:
8222 case X86::VSCATTERPF1DPSm:
8223 case X86::VSCATTERPF1QPDm:
8224 case X86::VSCATTERPF1QPSm:
8225 case X86::VSCATTERQPDZ128mr:
8226 case X86::VSCATTERQPDZ256mr:
8227 case X86::VSCATTERQPDZmr:
8228 case X86::VSCATTERQPSZ128mr:
8229 case X86::VSCATTERQPSZ256mr:
8230 case X86::VSCATTERQPSZmr:
8231 case X86::VPSCATTERDDZ128mr:
8232 case X86::VPSCATTERDDZ256mr:
8233 case X86::VPSCATTERDDZmr:
8234 case X86::VPSCATTERDQZ128mr:
8235 case X86::VPSCATTERDQZ256mr:
8236 case X86::VPSCATTERDQZmr:
8237 case X86::VPSCATTERQDZ128mr:
8238 case X86::VPSCATTERQDZ256mr:
8239 case X86::VPSCATTERQDZmr:
8240 case X86::VPSCATTERQQZ128mr:
8241 case X86::VPSCATTERQQZ256mr:
8242 case X86::VPSCATTERQQZmr:
8243 return true;
8244 }
8245 }
8246
hasHighOperandLatency(const TargetSchedModel & SchedModel,const MachineRegisterInfo * MRI,const MachineInstr & DefMI,unsigned DefIdx,const MachineInstr & UseMI,unsigned UseIdx) const8247 bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
8248 const MachineRegisterInfo *MRI,
8249 const MachineInstr &DefMI,
8250 unsigned DefIdx,
8251 const MachineInstr &UseMI,
8252 unsigned UseIdx) const {
8253 return isHighLatencyDef(DefMI.getOpcode());
8254 }
8255
hasReassociableOperands(const MachineInstr & Inst,const MachineBasicBlock * MBB) const8256 bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
8257 const MachineBasicBlock *MBB) const {
8258 assert(Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 &&
8259 Inst.getNumDefs() <= 2 && "Reassociation needs binary operators");
8260
8261 // Integer binary math/logic instructions have a third source operand:
8262 // the EFLAGS register. That operand must be both defined here and never
8263 // used; ie, it must be dead. If the EFLAGS operand is live, then we can
8264 // not change anything because rearranging the operands could affect other
8265 // instructions that depend on the exact status flags (zero, sign, etc.)
8266 // that are set by using these particular operands with this operation.
8267 const MachineOperand *FlagDef = Inst.findRegisterDefOperand(X86::EFLAGS);
8268 assert((Inst.getNumDefs() == 1 || FlagDef) &&
8269 "Implicit def isn't flags?");
8270 if (FlagDef && !FlagDef->isDead())
8271 return false;
8272
8273 return TargetInstrInfo::hasReassociableOperands(Inst, MBB);
8274 }
8275
8276 // TODO: There are many more machine instruction opcodes to match:
8277 // 1. Other data types (integer, vectors)
8278 // 2. Other math / logic operations (xor, or)
8279 // 3. Other forms of the same operation (intrinsics and other variants)
isAssociativeAndCommutative(const MachineInstr & Inst) const8280 bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
8281 switch (Inst.getOpcode()) {
8282 case X86::AND8rr:
8283 case X86::AND16rr:
8284 case X86::AND32rr:
8285 case X86::AND64rr:
8286 case X86::OR8rr:
8287 case X86::OR16rr:
8288 case X86::OR32rr:
8289 case X86::OR64rr:
8290 case X86::XOR8rr:
8291 case X86::XOR16rr:
8292 case X86::XOR32rr:
8293 case X86::XOR64rr:
8294 case X86::IMUL16rr:
8295 case X86::IMUL32rr:
8296 case X86::IMUL64rr:
8297 case X86::PANDrr:
8298 case X86::PORrr:
8299 case X86::PXORrr:
8300 case X86::ANDPDrr:
8301 case X86::ANDPSrr:
8302 case X86::ORPDrr:
8303 case X86::ORPSrr:
8304 case X86::XORPDrr:
8305 case X86::XORPSrr:
8306 case X86::PADDBrr:
8307 case X86::PADDWrr:
8308 case X86::PADDDrr:
8309 case X86::PADDQrr:
8310 case X86::PMULLWrr:
8311 case X86::PMULLDrr:
8312 case X86::PMAXSBrr:
8313 case X86::PMAXSDrr:
8314 case X86::PMAXSWrr:
8315 case X86::PMAXUBrr:
8316 case X86::PMAXUDrr:
8317 case X86::PMAXUWrr:
8318 case X86::PMINSBrr:
8319 case X86::PMINSDrr:
8320 case X86::PMINSWrr:
8321 case X86::PMINUBrr:
8322 case X86::PMINUDrr:
8323 case X86::PMINUWrr:
8324 case X86::VPANDrr:
8325 case X86::VPANDYrr:
8326 case X86::VPANDDZ128rr:
8327 case X86::VPANDDZ256rr:
8328 case X86::VPANDDZrr:
8329 case X86::VPANDQZ128rr:
8330 case X86::VPANDQZ256rr:
8331 case X86::VPANDQZrr:
8332 case X86::VPORrr:
8333 case X86::VPORYrr:
8334 case X86::VPORDZ128rr:
8335 case X86::VPORDZ256rr:
8336 case X86::VPORDZrr:
8337 case X86::VPORQZ128rr:
8338 case X86::VPORQZ256rr:
8339 case X86::VPORQZrr:
8340 case X86::VPXORrr:
8341 case X86::VPXORYrr:
8342 case X86::VPXORDZ128rr:
8343 case X86::VPXORDZ256rr:
8344 case X86::VPXORDZrr:
8345 case X86::VPXORQZ128rr:
8346 case X86::VPXORQZ256rr:
8347 case X86::VPXORQZrr:
8348 case X86::VANDPDrr:
8349 case X86::VANDPSrr:
8350 case X86::VANDPDYrr:
8351 case X86::VANDPSYrr:
8352 case X86::VANDPDZ128rr:
8353 case X86::VANDPSZ128rr:
8354 case X86::VANDPDZ256rr:
8355 case X86::VANDPSZ256rr:
8356 case X86::VANDPDZrr:
8357 case X86::VANDPSZrr:
8358 case X86::VORPDrr:
8359 case X86::VORPSrr:
8360 case X86::VORPDYrr:
8361 case X86::VORPSYrr:
8362 case X86::VORPDZ128rr:
8363 case X86::VORPSZ128rr:
8364 case X86::VORPDZ256rr:
8365 case X86::VORPSZ256rr:
8366 case X86::VORPDZrr:
8367 case X86::VORPSZrr:
8368 case X86::VXORPDrr:
8369 case X86::VXORPSrr:
8370 case X86::VXORPDYrr:
8371 case X86::VXORPSYrr:
8372 case X86::VXORPDZ128rr:
8373 case X86::VXORPSZ128rr:
8374 case X86::VXORPDZ256rr:
8375 case X86::VXORPSZ256rr:
8376 case X86::VXORPDZrr:
8377 case X86::VXORPSZrr:
8378 case X86::KADDBrr:
8379 case X86::KADDWrr:
8380 case X86::KADDDrr:
8381 case X86::KADDQrr:
8382 case X86::KANDBrr:
8383 case X86::KANDWrr:
8384 case X86::KANDDrr:
8385 case X86::KANDQrr:
8386 case X86::KORBrr:
8387 case X86::KORWrr:
8388 case X86::KORDrr:
8389 case X86::KORQrr:
8390 case X86::KXORBrr:
8391 case X86::KXORWrr:
8392 case X86::KXORDrr:
8393 case X86::KXORQrr:
8394 case X86::VPADDBrr:
8395 case X86::VPADDWrr:
8396 case X86::VPADDDrr:
8397 case X86::VPADDQrr:
8398 case X86::VPADDBYrr:
8399 case X86::VPADDWYrr:
8400 case X86::VPADDDYrr:
8401 case X86::VPADDQYrr:
8402 case X86::VPADDBZ128rr:
8403 case X86::VPADDWZ128rr:
8404 case X86::VPADDDZ128rr:
8405 case X86::VPADDQZ128rr:
8406 case X86::VPADDBZ256rr:
8407 case X86::VPADDWZ256rr:
8408 case X86::VPADDDZ256rr:
8409 case X86::VPADDQZ256rr:
8410 case X86::VPADDBZrr:
8411 case X86::VPADDWZrr:
8412 case X86::VPADDDZrr:
8413 case X86::VPADDQZrr:
8414 case X86::VPMULLWrr:
8415 case X86::VPMULLWYrr:
8416 case X86::VPMULLWZ128rr:
8417 case X86::VPMULLWZ256rr:
8418 case X86::VPMULLWZrr:
8419 case X86::VPMULLDrr:
8420 case X86::VPMULLDYrr:
8421 case X86::VPMULLDZ128rr:
8422 case X86::VPMULLDZ256rr:
8423 case X86::VPMULLDZrr:
8424 case X86::VPMULLQZ128rr:
8425 case X86::VPMULLQZ256rr:
8426 case X86::VPMULLQZrr:
8427 case X86::VPMAXSBrr:
8428 case X86::VPMAXSBYrr:
8429 case X86::VPMAXSBZ128rr:
8430 case X86::VPMAXSBZ256rr:
8431 case X86::VPMAXSBZrr:
8432 case X86::VPMAXSDrr:
8433 case X86::VPMAXSDYrr:
8434 case X86::VPMAXSDZ128rr:
8435 case X86::VPMAXSDZ256rr:
8436 case X86::VPMAXSDZrr:
8437 case X86::VPMAXSQZ128rr:
8438 case X86::VPMAXSQZ256rr:
8439 case X86::VPMAXSQZrr:
8440 case X86::VPMAXSWrr:
8441 case X86::VPMAXSWYrr:
8442 case X86::VPMAXSWZ128rr:
8443 case X86::VPMAXSWZ256rr:
8444 case X86::VPMAXSWZrr:
8445 case X86::VPMAXUBrr:
8446 case X86::VPMAXUBYrr:
8447 case X86::VPMAXUBZ128rr:
8448 case X86::VPMAXUBZ256rr:
8449 case X86::VPMAXUBZrr:
8450 case X86::VPMAXUDrr:
8451 case X86::VPMAXUDYrr:
8452 case X86::VPMAXUDZ128rr:
8453 case X86::VPMAXUDZ256rr:
8454 case X86::VPMAXUDZrr:
8455 case X86::VPMAXUQZ128rr:
8456 case X86::VPMAXUQZ256rr:
8457 case X86::VPMAXUQZrr:
8458 case X86::VPMAXUWrr:
8459 case X86::VPMAXUWYrr:
8460 case X86::VPMAXUWZ128rr:
8461 case X86::VPMAXUWZ256rr:
8462 case X86::VPMAXUWZrr:
8463 case X86::VPMINSBrr:
8464 case X86::VPMINSBYrr:
8465 case X86::VPMINSBZ128rr:
8466 case X86::VPMINSBZ256rr:
8467 case X86::VPMINSBZrr:
8468 case X86::VPMINSDrr:
8469 case X86::VPMINSDYrr:
8470 case X86::VPMINSDZ128rr:
8471 case X86::VPMINSDZ256rr:
8472 case X86::VPMINSDZrr:
8473 case X86::VPMINSQZ128rr:
8474 case X86::VPMINSQZ256rr:
8475 case X86::VPMINSQZrr:
8476 case X86::VPMINSWrr:
8477 case X86::VPMINSWYrr:
8478 case X86::VPMINSWZ128rr:
8479 case X86::VPMINSWZ256rr:
8480 case X86::VPMINSWZrr:
8481 case X86::VPMINUBrr:
8482 case X86::VPMINUBYrr:
8483 case X86::VPMINUBZ128rr:
8484 case X86::VPMINUBZ256rr:
8485 case X86::VPMINUBZrr:
8486 case X86::VPMINUDrr:
8487 case X86::VPMINUDYrr:
8488 case X86::VPMINUDZ128rr:
8489 case X86::VPMINUDZ256rr:
8490 case X86::VPMINUDZrr:
8491 case X86::VPMINUQZ128rr:
8492 case X86::VPMINUQZ256rr:
8493 case X86::VPMINUQZrr:
8494 case X86::VPMINUWrr:
8495 case X86::VPMINUWYrr:
8496 case X86::VPMINUWZ128rr:
8497 case X86::VPMINUWZ256rr:
8498 case X86::VPMINUWZrr:
8499 // Normal min/max instructions are not commutative because of NaN and signed
8500 // zero semantics, but these are. Thus, there's no need to check for global
8501 // relaxed math; the instructions themselves have the properties we need.
8502 case X86::MAXCPDrr:
8503 case X86::MAXCPSrr:
8504 case X86::MAXCSDrr:
8505 case X86::MAXCSSrr:
8506 case X86::MINCPDrr:
8507 case X86::MINCPSrr:
8508 case X86::MINCSDrr:
8509 case X86::MINCSSrr:
8510 case X86::VMAXCPDrr:
8511 case X86::VMAXCPSrr:
8512 case X86::VMAXCPDYrr:
8513 case X86::VMAXCPSYrr:
8514 case X86::VMAXCPDZ128rr:
8515 case X86::VMAXCPSZ128rr:
8516 case X86::VMAXCPDZ256rr:
8517 case X86::VMAXCPSZ256rr:
8518 case X86::VMAXCPDZrr:
8519 case X86::VMAXCPSZrr:
8520 case X86::VMAXCSDrr:
8521 case X86::VMAXCSSrr:
8522 case X86::VMAXCSDZrr:
8523 case X86::VMAXCSSZrr:
8524 case X86::VMINCPDrr:
8525 case X86::VMINCPSrr:
8526 case X86::VMINCPDYrr:
8527 case X86::VMINCPSYrr:
8528 case X86::VMINCPDZ128rr:
8529 case X86::VMINCPSZ128rr:
8530 case X86::VMINCPDZ256rr:
8531 case X86::VMINCPSZ256rr:
8532 case X86::VMINCPDZrr:
8533 case X86::VMINCPSZrr:
8534 case X86::VMINCSDrr:
8535 case X86::VMINCSSrr:
8536 case X86::VMINCSDZrr:
8537 case X86::VMINCSSZrr:
8538 case X86::VMAXCPHZ128rr:
8539 case X86::VMAXCPHZ256rr:
8540 case X86::VMAXCPHZrr:
8541 case X86::VMAXCSHZrr:
8542 case X86::VMINCPHZ128rr:
8543 case X86::VMINCPHZ256rr:
8544 case X86::VMINCPHZrr:
8545 case X86::VMINCSHZrr:
8546 return true;
8547 case X86::ADDPDrr:
8548 case X86::ADDPSrr:
8549 case X86::ADDSDrr:
8550 case X86::ADDSSrr:
8551 case X86::MULPDrr:
8552 case X86::MULPSrr:
8553 case X86::MULSDrr:
8554 case X86::MULSSrr:
8555 case X86::VADDPDrr:
8556 case X86::VADDPSrr:
8557 case X86::VADDPDYrr:
8558 case X86::VADDPSYrr:
8559 case X86::VADDPDZ128rr:
8560 case X86::VADDPSZ128rr:
8561 case X86::VADDPDZ256rr:
8562 case X86::VADDPSZ256rr:
8563 case X86::VADDPDZrr:
8564 case X86::VADDPSZrr:
8565 case X86::VADDSDrr:
8566 case X86::VADDSSrr:
8567 case X86::VADDSDZrr:
8568 case X86::VADDSSZrr:
8569 case X86::VMULPDrr:
8570 case X86::VMULPSrr:
8571 case X86::VMULPDYrr:
8572 case X86::VMULPSYrr:
8573 case X86::VMULPDZ128rr:
8574 case X86::VMULPSZ128rr:
8575 case X86::VMULPDZ256rr:
8576 case X86::VMULPSZ256rr:
8577 case X86::VMULPDZrr:
8578 case X86::VMULPSZrr:
8579 case X86::VMULSDrr:
8580 case X86::VMULSSrr:
8581 case X86::VMULSDZrr:
8582 case X86::VMULSSZrr:
8583 case X86::VADDPHZ128rr:
8584 case X86::VADDPHZ256rr:
8585 case X86::VADDPHZrr:
8586 case X86::VADDSHZrr:
8587 case X86::VMULPHZ128rr:
8588 case X86::VMULPHZ256rr:
8589 case X86::VMULPHZrr:
8590 case X86::VMULSHZrr:
8591 return Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
8592 Inst.getFlag(MachineInstr::MIFlag::FmNsz);
8593 default:
8594 return false;
8595 }
8596 }
8597
8598 /// If \p DescribedReg overlaps with the MOVrr instruction's destination
8599 /// register then, if possible, describe the value in terms of the source
8600 /// register.
8601 static Optional<ParamLoadedValue>
describeMOVrrLoadedValue(const MachineInstr & MI,Register DescribedReg,const TargetRegisterInfo * TRI)8602 describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg,
8603 const TargetRegisterInfo *TRI) {
8604 Register DestReg = MI.getOperand(0).getReg();
8605 Register SrcReg = MI.getOperand(1).getReg();
8606
8607 auto Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
8608
8609 // If the described register is the destination, just return the source.
8610 if (DestReg == DescribedReg)
8611 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
8612
8613 // If the described register is a sub-register of the destination register,
8614 // then pick out the source register's corresponding sub-register.
8615 if (unsigned SubRegIdx = TRI->getSubRegIndex(DestReg, DescribedReg)) {
8616 Register SrcSubReg = TRI->getSubReg(SrcReg, SubRegIdx);
8617 return ParamLoadedValue(MachineOperand::CreateReg(SrcSubReg, false), Expr);
8618 }
8619
8620 // The remaining case to consider is when the described register is a
8621 // super-register of the destination register. MOV8rr and MOV16rr does not
8622 // write to any of the other bytes in the register, meaning that we'd have to
8623 // describe the value using a combination of the source register and the
8624 // non-overlapping bits in the described register, which is not currently
8625 // possible.
8626 if (MI.getOpcode() == X86::MOV8rr || MI.getOpcode() == X86::MOV16rr ||
8627 !TRI->isSuperRegister(DestReg, DescribedReg))
8628 return None;
8629
8630 assert(MI.getOpcode() == X86::MOV32rr && "Unexpected super-register case");
8631 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
8632 }
8633
8634 Optional<ParamLoadedValue>
describeLoadedValue(const MachineInstr & MI,Register Reg) const8635 X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {
8636 const MachineOperand *Op = nullptr;
8637 DIExpression *Expr = nullptr;
8638
8639 const TargetRegisterInfo *TRI = &getRegisterInfo();
8640
8641 switch (MI.getOpcode()) {
8642 case X86::LEA32r:
8643 case X86::LEA64r:
8644 case X86::LEA64_32r: {
8645 // We may need to describe a 64-bit parameter with a 32-bit LEA.
8646 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8647 return None;
8648
8649 // Operand 4 could be global address. For now we do not support
8650 // such situation.
8651 if (!MI.getOperand(4).isImm() || !MI.getOperand(2).isImm())
8652 return None;
8653
8654 const MachineOperand &Op1 = MI.getOperand(1);
8655 const MachineOperand &Op2 = MI.getOperand(3);
8656 assert(Op2.isReg() && (Op2.getReg() == X86::NoRegister ||
8657 Register::isPhysicalRegister(Op2.getReg())));
8658
8659 // Omit situations like:
8660 // %rsi = lea %rsi, 4, ...
8661 if ((Op1.isReg() && Op1.getReg() == MI.getOperand(0).getReg()) ||
8662 Op2.getReg() == MI.getOperand(0).getReg())
8663 return None;
8664 else if ((Op1.isReg() && Op1.getReg() != X86::NoRegister &&
8665 TRI->regsOverlap(Op1.getReg(), MI.getOperand(0).getReg())) ||
8666 (Op2.getReg() != X86::NoRegister &&
8667 TRI->regsOverlap(Op2.getReg(), MI.getOperand(0).getReg())))
8668 return None;
8669
8670 int64_t Coef = MI.getOperand(2).getImm();
8671 int64_t Offset = MI.getOperand(4).getImm();
8672 SmallVector<uint64_t, 8> Ops;
8673
8674 if ((Op1.isReg() && Op1.getReg() != X86::NoRegister)) {
8675 Op = &Op1;
8676 } else if (Op1.isFI())
8677 Op = &Op1;
8678
8679 if (Op && Op->isReg() && Op->getReg() == Op2.getReg() && Coef > 0) {
8680 Ops.push_back(dwarf::DW_OP_constu);
8681 Ops.push_back(Coef + 1);
8682 Ops.push_back(dwarf::DW_OP_mul);
8683 } else {
8684 if (Op && Op2.getReg() != X86::NoRegister) {
8685 int dwarfReg = TRI->getDwarfRegNum(Op2.getReg(), false);
8686 if (dwarfReg < 0)
8687 return None;
8688 else if (dwarfReg < 32) {
8689 Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg);
8690 Ops.push_back(0);
8691 } else {
8692 Ops.push_back(dwarf::DW_OP_bregx);
8693 Ops.push_back(dwarfReg);
8694 Ops.push_back(0);
8695 }
8696 } else if (!Op) {
8697 assert(Op2.getReg() != X86::NoRegister);
8698 Op = &Op2;
8699 }
8700
8701 if (Coef > 1) {
8702 assert(Op2.getReg() != X86::NoRegister);
8703 Ops.push_back(dwarf::DW_OP_constu);
8704 Ops.push_back(Coef);
8705 Ops.push_back(dwarf::DW_OP_mul);
8706 }
8707
8708 if (((Op1.isReg() && Op1.getReg() != X86::NoRegister) || Op1.isFI()) &&
8709 Op2.getReg() != X86::NoRegister) {
8710 Ops.push_back(dwarf::DW_OP_plus);
8711 }
8712 }
8713
8714 DIExpression::appendOffset(Ops, Offset);
8715 Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), Ops);
8716
8717 return ParamLoadedValue(*Op, Expr);;
8718 }
8719 case X86::MOV8ri:
8720 case X86::MOV16ri:
8721 // TODO: Handle MOV8ri and MOV16ri.
8722 return None;
8723 case X86::MOV32ri:
8724 case X86::MOV64ri:
8725 case X86::MOV64ri32:
8726 // MOV32ri may be used for producing zero-extended 32-bit immediates in
8727 // 64-bit parameters, so we need to consider super-registers.
8728 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8729 return None;
8730 return ParamLoadedValue(MI.getOperand(1), Expr);
8731 case X86::MOV8rr:
8732 case X86::MOV16rr:
8733 case X86::MOV32rr:
8734 case X86::MOV64rr:
8735 return describeMOVrrLoadedValue(MI, Reg, TRI);
8736 case X86::XOR32rr: {
8737 // 64-bit parameters are zero-materialized using XOR32rr, so also consider
8738 // super-registers.
8739 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8740 return None;
8741 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
8742 return ParamLoadedValue(MachineOperand::CreateImm(0), Expr);
8743 return None;
8744 }
8745 case X86::MOVSX64rr32: {
8746 // We may need to describe the lower 32 bits of the MOVSX; for example, in
8747 // cases like this:
8748 //
8749 // $ebx = [...]
8750 // $rdi = MOVSX64rr32 $ebx
8751 // $esi = MOV32rr $edi
8752 if (!TRI->isSubRegisterEq(MI.getOperand(0).getReg(), Reg))
8753 return None;
8754
8755 Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
8756
8757 // If the described register is the destination register we need to
8758 // sign-extend the source register from 32 bits. The other case we handle
8759 // is when the described register is the 32-bit sub-register of the
8760 // destination register, in case we just need to return the source
8761 // register.
8762 if (Reg == MI.getOperand(0).getReg())
8763 Expr = DIExpression::appendExt(Expr, 32, 64, true);
8764 else
8765 assert(X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg) &&
8766 "Unhandled sub-register case for MOVSX64rr32");
8767
8768 return ParamLoadedValue(MI.getOperand(1), Expr);
8769 }
8770 default:
8771 assert(!MI.isMoveImmediate() && "Unexpected MoveImm instruction");
8772 return TargetInstrInfo::describeLoadedValue(MI, Reg);
8773 }
8774 }
8775
8776 /// This is an architecture-specific helper function of reassociateOps.
8777 /// Set special operand attributes for new instructions after reassociation.
setSpecialOperandAttr(MachineInstr & OldMI1,MachineInstr & OldMI2,MachineInstr & NewMI1,MachineInstr & NewMI2) const8778 void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
8779 MachineInstr &OldMI2,
8780 MachineInstr &NewMI1,
8781 MachineInstr &NewMI2) const {
8782 // Propagate FP flags from the original instructions.
8783 // But clear poison-generating flags because those may not be valid now.
8784 // TODO: There should be a helper function for copying only fast-math-flags.
8785 uint16_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags();
8786 NewMI1.setFlags(IntersectedFlags);
8787 NewMI1.clearFlag(MachineInstr::MIFlag::NoSWrap);
8788 NewMI1.clearFlag(MachineInstr::MIFlag::NoUWrap);
8789 NewMI1.clearFlag(MachineInstr::MIFlag::IsExact);
8790
8791 NewMI2.setFlags(IntersectedFlags);
8792 NewMI2.clearFlag(MachineInstr::MIFlag::NoSWrap);
8793 NewMI2.clearFlag(MachineInstr::MIFlag::NoUWrap);
8794 NewMI2.clearFlag(MachineInstr::MIFlag::IsExact);
8795
8796 // Integer instructions may define an implicit EFLAGS dest register operand.
8797 MachineOperand *OldFlagDef1 = OldMI1.findRegisterDefOperand(X86::EFLAGS);
8798 MachineOperand *OldFlagDef2 = OldMI2.findRegisterDefOperand(X86::EFLAGS);
8799
8800 assert(!OldFlagDef1 == !OldFlagDef2 &&
8801 "Unexpected instruction type for reassociation");
8802
8803 if (!OldFlagDef1 || !OldFlagDef2)
8804 return;
8805
8806 assert(OldFlagDef1->isDead() && OldFlagDef2->isDead() &&
8807 "Must have dead EFLAGS operand in reassociable instruction");
8808
8809 MachineOperand *NewFlagDef1 = NewMI1.findRegisterDefOperand(X86::EFLAGS);
8810 MachineOperand *NewFlagDef2 = NewMI2.findRegisterDefOperand(X86::EFLAGS);
8811
8812 assert(NewFlagDef1 && NewFlagDef2 &&
8813 "Unexpected operand in reassociable instruction");
8814
8815 // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations
8816 // of this pass or other passes. The EFLAGS operands must be dead in these new
8817 // instructions because the EFLAGS operands in the original instructions must
8818 // be dead in order for reassociation to occur.
8819 NewFlagDef1->setIsDead();
8820 NewFlagDef2->setIsDead();
8821 }
8822
8823 std::pair<unsigned, unsigned>
decomposeMachineOperandsTargetFlags(unsigned TF) const8824 X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
8825 return std::make_pair(TF, 0u);
8826 }
8827
8828 ArrayRef<std::pair<unsigned, const char *>>
getSerializableDirectMachineOperandTargetFlags() const8829 X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
8830 using namespace X86II;
8831 static const std::pair<unsigned, const char *> TargetFlags[] = {
8832 {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"},
8833 {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"},
8834 {MO_GOT, "x86-got"},
8835 {MO_GOTOFF, "x86-gotoff"},
8836 {MO_GOTPCREL, "x86-gotpcrel"},
8837 {MO_PLT, "x86-plt"},
8838 {MO_TLSGD, "x86-tlsgd"},
8839 {MO_TLSLD, "x86-tlsld"},
8840 {MO_TLSLDM, "x86-tlsldm"},
8841 {MO_GOTTPOFF, "x86-gottpoff"},
8842 {MO_INDNTPOFF, "x86-indntpoff"},
8843 {MO_TPOFF, "x86-tpoff"},
8844 {MO_DTPOFF, "x86-dtpoff"},
8845 {MO_NTPOFF, "x86-ntpoff"},
8846 {MO_GOTNTPOFF, "x86-gotntpoff"},
8847 {MO_DLLIMPORT, "x86-dllimport"},
8848 {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"},
8849 {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"},
8850 {MO_TLVP, "x86-tlvp"},
8851 {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"},
8852 {MO_SECREL, "x86-secrel"},
8853 {MO_COFFSTUB, "x86-coffstub"}};
8854 return makeArrayRef(TargetFlags);
8855 }
8856
8857 namespace {
8858 /// Create Global Base Reg pass. This initializes the PIC
8859 /// global base register for x86-32.
8860 struct CGBR : public MachineFunctionPass {
8861 static char ID;
CGBR__anon0e6519760611::CGBR8862 CGBR() : MachineFunctionPass(ID) {}
8863
runOnMachineFunction__anon0e6519760611::CGBR8864 bool runOnMachineFunction(MachineFunction &MF) override {
8865 const X86TargetMachine *TM =
8866 static_cast<const X86TargetMachine *>(&MF.getTarget());
8867 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
8868
8869 // Don't do anything in the 64-bit small and kernel code models. They use
8870 // RIP-relative addressing for everything.
8871 if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small ||
8872 TM->getCodeModel() == CodeModel::Kernel))
8873 return false;
8874
8875 // Only emit a global base reg in PIC mode.
8876 if (!TM->isPositionIndependent())
8877 return false;
8878
8879 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
8880 Register GlobalBaseReg = X86FI->getGlobalBaseReg();
8881
8882 // If we didn't need a GlobalBaseReg, don't insert code.
8883 if (GlobalBaseReg == 0)
8884 return false;
8885
8886 // Insert the set of GlobalBaseReg into the first MBB of the function
8887 MachineBasicBlock &FirstMBB = MF.front();
8888 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
8889 DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
8890 MachineRegisterInfo &RegInfo = MF.getRegInfo();
8891 const X86InstrInfo *TII = STI.getInstrInfo();
8892
8893 Register PC;
8894 if (STI.isPICStyleGOT())
8895 PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
8896 else
8897 PC = GlobalBaseReg;
8898
8899 if (STI.is64Bit()) {
8900 if (TM->getCodeModel() == CodeModel::Medium) {
8901 // In the medium code model, use a RIP-relative LEA to materialize the
8902 // GOT.
8903 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC)
8904 .addReg(X86::RIP)
8905 .addImm(0)
8906 .addReg(0)
8907 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_")
8908 .addReg(0);
8909 } else if (TM->getCodeModel() == CodeModel::Large) {
8910 // In the large code model, we are aiming for this code, though the
8911 // register allocation may vary:
8912 // leaq .LN$pb(%rip), %rax
8913 // movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx
8914 // addq %rcx, %rax
8915 // RAX now holds address of _GLOBAL_OFFSET_TABLE_.
8916 Register PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
8917 Register GOTReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
8918 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg)
8919 .addReg(X86::RIP)
8920 .addImm(0)
8921 .addReg(0)
8922 .addSym(MF.getPICBaseSymbol())
8923 .addReg(0);
8924 std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol());
8925 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg)
8926 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
8927 X86II::MO_PIC_BASE_OFFSET);
8928 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC)
8929 .addReg(PBReg, RegState::Kill)
8930 .addReg(GOTReg, RegState::Kill);
8931 } else {
8932 llvm_unreachable("unexpected code model");
8933 }
8934 } else {
8935 // Operand of MovePCtoStack is completely ignored by asm printer. It's
8936 // only used in JIT code emission as displacement to pc.
8937 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
8938
8939 // If we're using vanilla 'GOT' PIC style, we should use relative
8940 // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
8941 if (STI.isPICStyleGOT()) {
8942 // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel],
8943 // %some_register
8944 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
8945 .addReg(PC)
8946 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
8947 X86II::MO_GOT_ABSOLUTE_ADDRESS);
8948 }
8949 }
8950
8951 return true;
8952 }
8953
getPassName__anon0e6519760611::CGBR8954 StringRef getPassName() const override {
8955 return "X86 PIC Global Base Reg Initialization";
8956 }
8957
getAnalysisUsage__anon0e6519760611::CGBR8958 void getAnalysisUsage(AnalysisUsage &AU) const override {
8959 AU.setPreservesCFG();
8960 MachineFunctionPass::getAnalysisUsage(AU);
8961 }
8962 };
8963 } // namespace
8964
8965 char CGBR::ID = 0;
8966 FunctionPass*
createX86GlobalBaseRegPass()8967 llvm::createX86GlobalBaseRegPass() { return new CGBR(); }
8968
8969 namespace {
8970 struct LDTLSCleanup : public MachineFunctionPass {
8971 static char ID;
LDTLSCleanup__anon0e6519760711::LDTLSCleanup8972 LDTLSCleanup() : MachineFunctionPass(ID) {}
8973
runOnMachineFunction__anon0e6519760711::LDTLSCleanup8974 bool runOnMachineFunction(MachineFunction &MF) override {
8975 if (skipFunction(MF.getFunction()))
8976 return false;
8977
8978 X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
8979 if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
8980 // No point folding accesses if there isn't at least two.
8981 return false;
8982 }
8983
8984 MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
8985 return VisitNode(DT->getRootNode(), 0);
8986 }
8987
8988 // Visit the dominator subtree rooted at Node in pre-order.
8989 // If TLSBaseAddrReg is non-null, then use that to replace any
8990 // TLS_base_addr instructions. Otherwise, create the register
8991 // when the first such instruction is seen, and then use it
8992 // as we encounter more instructions.
VisitNode__anon0e6519760711::LDTLSCleanup8993 bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
8994 MachineBasicBlock *BB = Node->getBlock();
8995 bool Changed = false;
8996
8997 // Traverse the current block.
8998 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
8999 ++I) {
9000 switch (I->getOpcode()) {
9001 case X86::TLS_base_addr32:
9002 case X86::TLS_base_addr64:
9003 if (TLSBaseAddrReg)
9004 I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
9005 else
9006 I = SetRegister(*I, &TLSBaseAddrReg);
9007 Changed = true;
9008 break;
9009 default:
9010 break;
9011 }
9012 }
9013
9014 // Visit the children of this block in the dominator tree.
9015 for (auto I = Node->begin(), E = Node->end(); I != E; ++I) {
9016 Changed |= VisitNode(*I, TLSBaseAddrReg);
9017 }
9018
9019 return Changed;
9020 }
9021
9022 // Replace the TLS_base_addr instruction I with a copy from
9023 // TLSBaseAddrReg, returning the new instruction.
ReplaceTLSBaseAddrCall__anon0e6519760711::LDTLSCleanup9024 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
9025 unsigned TLSBaseAddrReg) {
9026 MachineFunction *MF = I.getParent()->getParent();
9027 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
9028 const bool is64Bit = STI.is64Bit();
9029 const X86InstrInfo *TII = STI.getInstrInfo();
9030
9031 // Insert a Copy from TLSBaseAddrReg to RAX/EAX.
9032 MachineInstr *Copy =
9033 BuildMI(*I.getParent(), I, I.getDebugLoc(),
9034 TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
9035 .addReg(TLSBaseAddrReg);
9036
9037 // Erase the TLS_base_addr instruction.
9038 I.eraseFromParent();
9039
9040 return Copy;
9041 }
9042
9043 // Create a virtual register in *TLSBaseAddrReg, and populate it by
9044 // inserting a copy instruction after I. Returns the new instruction.
SetRegister__anon0e6519760711::LDTLSCleanup9045 MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
9046 MachineFunction *MF = I.getParent()->getParent();
9047 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
9048 const bool is64Bit = STI.is64Bit();
9049 const X86InstrInfo *TII = STI.getInstrInfo();
9050
9051 // Create a virtual register for the TLS base address.
9052 MachineRegisterInfo &RegInfo = MF->getRegInfo();
9053 *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit
9054 ? &X86::GR64RegClass
9055 : &X86::GR32RegClass);
9056
9057 // Insert a copy from RAX/EAX to TLSBaseAddrReg.
9058 MachineInstr *Next = I.getNextNode();
9059 MachineInstr *Copy =
9060 BuildMI(*I.getParent(), Next, I.getDebugLoc(),
9061 TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
9062 .addReg(is64Bit ? X86::RAX : X86::EAX);
9063
9064 return Copy;
9065 }
9066
getPassName__anon0e6519760711::LDTLSCleanup9067 StringRef getPassName() const override {
9068 return "Local Dynamic TLS Access Clean-up";
9069 }
9070
getAnalysisUsage__anon0e6519760711::LDTLSCleanup9071 void getAnalysisUsage(AnalysisUsage &AU) const override {
9072 AU.setPreservesCFG();
9073 AU.addRequired<MachineDominatorTree>();
9074 MachineFunctionPass::getAnalysisUsage(AU);
9075 }
9076 };
9077 }
9078
9079 char LDTLSCleanup::ID = 0;
9080 FunctionPass*
createCleanupLocalDynamicTLSPass()9081 llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }
9082
9083 /// Constants defining how certain sequences should be outlined.
9084 ///
9085 /// \p MachineOutlinerDefault implies that the function is called with a call
9086 /// instruction, and a return must be emitted for the outlined function frame.
9087 ///
9088 /// That is,
9089 ///
9090 /// I1 OUTLINED_FUNCTION:
9091 /// I2 --> call OUTLINED_FUNCTION I1
9092 /// I3 I2
9093 /// I3
9094 /// ret
9095 ///
9096 /// * Call construction overhead: 1 (call instruction)
9097 /// * Frame construction overhead: 1 (return instruction)
9098 ///
9099 /// \p MachineOutlinerTailCall implies that the function is being tail called.
9100 /// A jump is emitted instead of a call, and the return is already present in
9101 /// the outlined sequence. That is,
9102 ///
9103 /// I1 OUTLINED_FUNCTION:
9104 /// I2 --> jmp OUTLINED_FUNCTION I1
9105 /// ret I2
9106 /// ret
9107 ///
9108 /// * Call construction overhead: 1 (jump instruction)
9109 /// * Frame construction overhead: 0 (don't need to return)
9110 ///
9111 enum MachineOutlinerClass {
9112 MachineOutlinerDefault,
9113 MachineOutlinerTailCall
9114 };
9115
getOutliningCandidateInfo(std::vector<outliner::Candidate> & RepeatedSequenceLocs) const9116 outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo(
9117 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
9118 unsigned SequenceSize =
9119 std::accumulate(RepeatedSequenceLocs[0].front(),
9120 std::next(RepeatedSequenceLocs[0].back()), 0,
9121 [](unsigned Sum, const MachineInstr &MI) {
9122 // FIXME: x86 doesn't implement getInstSizeInBytes, so
9123 // we can't tell the cost. Just assume each instruction
9124 // is one byte.
9125 if (MI.isDebugInstr() || MI.isKill())
9126 return Sum;
9127 return Sum + 1;
9128 });
9129
9130 // We check to see if CFI Instructions are present, and if they are
9131 // we find the number of CFI Instructions in the candidates.
9132 unsigned CFICount = 0;
9133 MachineBasicBlock::iterator MBBI = RepeatedSequenceLocs[0].front();
9134 for (unsigned Loc = RepeatedSequenceLocs[0].getStartIdx();
9135 Loc < RepeatedSequenceLocs[0].getEndIdx() + 1; Loc++) {
9136 const std::vector<MCCFIInstruction> &CFIInstructions =
9137 RepeatedSequenceLocs[0].getMF()->getFrameInstructions();
9138 if (MBBI->isCFIInstruction()) {
9139 unsigned CFIIndex = MBBI->getOperand(0).getCFIIndex();
9140 MCCFIInstruction CFI = CFIInstructions[CFIIndex];
9141 CFICount++;
9142 }
9143 MBBI++;
9144 }
9145
9146 // We compare the number of found CFI Instructions to the number of CFI
9147 // instructions in the parent function for each candidate. We must check this
9148 // since if we outline one of the CFI instructions in a function, we have to
9149 // outline them all for correctness. If we do not, the address offsets will be
9150 // incorrect between the two sections of the program.
9151 for (outliner::Candidate &C : RepeatedSequenceLocs) {
9152 std::vector<MCCFIInstruction> CFIInstructions =
9153 C.getMF()->getFrameInstructions();
9154
9155 if (CFICount > 0 && CFICount != CFIInstructions.size())
9156 return outliner::OutlinedFunction();
9157 }
9158
9159 // FIXME: Use real size in bytes for call and ret instructions.
9160 if (RepeatedSequenceLocs[0].back()->isTerminator()) {
9161 for (outliner::Candidate &C : RepeatedSequenceLocs)
9162 C.setCallInfo(MachineOutlinerTailCall, 1);
9163
9164 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
9165 0, // Number of bytes to emit frame.
9166 MachineOutlinerTailCall // Type of frame.
9167 );
9168 }
9169
9170 if (CFICount > 0)
9171 return outliner::OutlinedFunction();
9172
9173 for (outliner::Candidate &C : RepeatedSequenceLocs)
9174 C.setCallInfo(MachineOutlinerDefault, 1);
9175
9176 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1,
9177 MachineOutlinerDefault);
9178 }
9179
isFunctionSafeToOutlineFrom(MachineFunction & MF,bool OutlineFromLinkOnceODRs) const9180 bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
9181 bool OutlineFromLinkOnceODRs) const {
9182 const Function &F = MF.getFunction();
9183
9184 // Does the function use a red zone? If it does, then we can't risk messing
9185 // with the stack.
9186 if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) {
9187 // It could have a red zone. If it does, then we don't want to touch it.
9188 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
9189 if (!X86FI || X86FI->getUsesRedZone())
9190 return false;
9191 }
9192
9193 // If we *don't* want to outline from things that could potentially be deduped
9194 // then return false.
9195 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
9196 return false;
9197
9198 // This function is viable for outlining, so return true.
9199 return true;
9200 }
9201
9202 outliner::InstrType
getOutliningType(MachineBasicBlock::iterator & MIT,unsigned Flags) const9203 X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
9204 MachineInstr &MI = *MIT;
9205 // Don't allow debug values to impact outlining type.
9206 if (MI.isDebugInstr() || MI.isIndirectDebugValue())
9207 return outliner::InstrType::Invisible;
9208
9209 // At this point, KILL instructions don't really tell us much so we can go
9210 // ahead and skip over them.
9211 if (MI.isKill())
9212 return outliner::InstrType::Invisible;
9213
9214 // Is this a tail call? If yes, we can outline as a tail call.
9215 if (isTailCall(MI))
9216 return outliner::InstrType::Legal;
9217
9218 // Is this the terminator of a basic block?
9219 if (MI.isTerminator() || MI.isReturn()) {
9220
9221 // Does its parent have any successors in its MachineFunction?
9222 if (MI.getParent()->succ_empty())
9223 return outliner::InstrType::Legal;
9224
9225 // It does, so we can't tail call it.
9226 return outliner::InstrType::Illegal;
9227 }
9228
9229 // Don't outline anything that modifies or reads from the stack pointer.
9230 //
9231 // FIXME: There are instructions which are being manually built without
9232 // explicit uses/defs so we also have to check the MCInstrDesc. We should be
9233 // able to remove the extra checks once those are fixed up. For example,
9234 // sometimes we might get something like %rax = POP64r 1. This won't be
9235 // caught by modifiesRegister or readsRegister even though the instruction
9236 // really ought to be formed so that modifiesRegister/readsRegister would
9237 // catch it.
9238 if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) ||
9239 MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
9240 MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
9241 return outliner::InstrType::Illegal;
9242
9243 // Outlined calls change the instruction pointer, so don't read from it.
9244 if (MI.readsRegister(X86::RIP, &RI) ||
9245 MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
9246 MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
9247 return outliner::InstrType::Illegal;
9248
9249 // Positions can't safely be outlined.
9250 if (MI.isPosition())
9251 return outliner::InstrType::Illegal;
9252
9253 // Make sure none of the operands of this instruction do anything tricky.
9254 for (const MachineOperand &MOP : MI.operands())
9255 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
9256 MOP.isTargetIndex())
9257 return outliner::InstrType::Illegal;
9258
9259 return outliner::InstrType::Legal;
9260 }
9261
buildOutlinedFrame(MachineBasicBlock & MBB,MachineFunction & MF,const outliner::OutlinedFunction & OF) const9262 void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB,
9263 MachineFunction &MF,
9264 const outliner::OutlinedFunction &OF)
9265 const {
9266 // If we're a tail call, we already have a return, so don't do anything.
9267 if (OF.FrameConstructionID == MachineOutlinerTailCall)
9268 return;
9269
9270 // We're a normal call, so our sequence doesn't have a return instruction.
9271 // Add it in.
9272 MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RETQ));
9273 MBB.insert(MBB.end(), retq);
9274 }
9275
9276 MachineBasicBlock::iterator
insertOutlinedCall(Module & M,MachineBasicBlock & MBB,MachineBasicBlock::iterator & It,MachineFunction & MF,const outliner::Candidate & C) const9277 X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
9278 MachineBasicBlock::iterator &It,
9279 MachineFunction &MF,
9280 const outliner::Candidate &C) const {
9281 // Is it a tail call?
9282 if (C.CallConstructionID == MachineOutlinerTailCall) {
9283 // Yes, just insert a JMP.
9284 It = MBB.insert(It,
9285 BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64))
9286 .addGlobalAddress(M.getNamedValue(MF.getName())));
9287 } else {
9288 // No, insert a call.
9289 It = MBB.insert(It,
9290 BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32))
9291 .addGlobalAddress(M.getNamedValue(MF.getName())));
9292 }
9293
9294 return It;
9295 }
9296
9297 #define GET_INSTRINFO_HELPERS
9298 #include "X86GenInstrInfo.inc"
9299