1//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the various pseudo instructions used by the compiler, 10// as well as Pat patterns used during instruction selection. 11// 12//===----------------------------------------------------------------------===// 13 14//===----------------------------------------------------------------------===// 15// Pattern Matching Support 16 17def GetLo32XForm : SDNodeXForm<imm, [{ 18 // Transformation function: get the low 32 bits. 19 return getI32Imm((uint32_t)N->getZExtValue(), SDLoc(N)); 20}]>; 21 22 23//===----------------------------------------------------------------------===// 24// Random Pseudo Instructions. 25 26// PIC base construction. This expands to code that looks like this: 27// call $next_inst 28// popl %destreg" 29let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP], 30 SchedRW = [WriteJump] in 31 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label), 32 "", []>; 33 34// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into 35// a stack adjustment and the codegen must know that they may modify the stack 36// pointer before prolog-epilog rewriting occurs. 37// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become 38// sub / add which can clobber EFLAGS. 39let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in { 40def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), 41 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3), 42 "#ADJCALLSTACKDOWN", []>, Requires<[NotLP64]>; 43def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), 44 "#ADJCALLSTACKUP", 45 [(X86callseq_end timm:$amt1, timm:$amt2)]>, 46 Requires<[NotLP64]>; 47} 48def : Pat<(X86callseq_start timm:$amt1, timm:$amt2), 49 (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>; 50 51 52// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into 53// a stack adjustment and the codegen must know that they may modify the stack 54// pointer before prolog-epilog rewriting occurs. 55// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become 56// sub / add which can clobber EFLAGS. 57let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in { 58def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), 59 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3), 60 "#ADJCALLSTACKDOWN", []>, Requires<[IsLP64]>; 61def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), 62 "#ADJCALLSTACKUP", 63 [(X86callseq_end timm:$amt1, timm:$amt2)]>, 64 Requires<[IsLP64]>; 65} 66def : Pat<(X86callseq_start timm:$amt1, timm:$amt2), 67 (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>; 68 69let SchedRW = [WriteSystem] in { 70 71// x86-64 va_start lowering magic. 72let usesCustomInserter = 1, Defs = [EFLAGS] in { 73def VASTART_SAVE_XMM_REGS : I<0, Pseudo, 74 (outs), 75 (ins GR8:$al, 76 i64imm:$regsavefi, i64imm:$offset, 77 variable_ops), 78 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset", 79 [(X86vastart_save_xmm_regs GR8:$al, 80 imm:$regsavefi, 81 imm:$offset), 82 (implicit EFLAGS)]>; 83 84// The VAARG_64 pseudo-instruction takes the address of the va_list, 85// and places the address of the next argument into a register. 86let Defs = [EFLAGS] in 87def VAARG_64 : I<0, Pseudo, 88 (outs GR64:$dst), 89 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align), 90 "#VAARG_64 $dst, $ap, $size, $mode, $align", 91 [(set GR64:$dst, 92 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)), 93 (implicit EFLAGS)]>; 94 95 96// When using segmented stacks these are lowered into instructions which first 97// check if the current stacklet has enough free memory. If it does, memory is 98// allocated by bumping the stack pointer. Otherwise memory is allocated from 99// the heap. 100 101let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 102def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size), 103 "# variable sized alloca for segmented stacks", 104 [(set GR32:$dst, 105 (X86SegAlloca GR32:$size))]>, 106 Requires<[NotLP64]>; 107 108let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 109def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size), 110 "# variable sized alloca for segmented stacks", 111 [(set GR64:$dst, 112 (X86SegAlloca GR64:$size))]>, 113 Requires<[In64BitMode]>; 114} 115 116// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows 117// targets. These calls are needed to probe the stack when allocating more than 118// 4k bytes in one go. Touching the stack at 4K increments is necessary to 119// ensure that the guard pages used by the OS virtual memory manager are 120// allocated in correct sequence. 121// The main point of having separate instruction are extra unmodelled effects 122// (compared to ordinary calls) like stack pointer change. 123 124let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 125def WIN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size), 126 "# dynamic stack allocation", 127 [(X86WinAlloca GR32:$size)]>, 128 Requires<[NotLP64]>; 129 130let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 131def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size), 132 "# dynamic stack allocation", 133 [(X86WinAlloca GR64:$size)]>, 134 Requires<[In64BitMode]>; 135} // SchedRW 136 137// These instructions XOR the frame pointer into a GPR. They are used in some 138// stack protection schemes. These are post-RA pseudos because we only know the 139// frame register after register allocation. 140let Constraints = "$src = $dst", isMoveImm = 1, isPseudo = 1, Defs = [EFLAGS] in { 141 def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src), 142 "xorl\t$$FP, $src", []>, 143 Requires<[NotLP64]>, Sched<[WriteALU]>; 144 def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src), 145 "xorq\t$$FP $src", []>, 146 Requires<[In64BitMode]>, Sched<[WriteALU]>; 147} 148 149//===----------------------------------------------------------------------===// 150// EH Pseudo Instructions 151// 152let SchedRW = [WriteSystem] in { 153let isTerminator = 1, isReturn = 1, isBarrier = 1, 154 hasCtrlDep = 1, isCodeGenOnly = 1 in { 155def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr), 156 "ret\t#eh_return, addr: $addr", 157 [(X86ehret GR32:$addr)]>, Sched<[WriteJumpLd]>; 158 159} 160 161let isTerminator = 1, isReturn = 1, isBarrier = 1, 162 hasCtrlDep = 1, isCodeGenOnly = 1 in { 163def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), 164 "ret\t#eh_return, addr: $addr", 165 [(X86ehret GR64:$addr)]>, Sched<[WriteJumpLd]>; 166 167} 168 169let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, 170 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1 in { 171 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET", [(cleanupret)]>; 172 173 // CATCHRET needs a custom inserter for SEH. 174 let usesCustomInserter = 1 in 175 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from), 176 "# CATCHRET", 177 [(catchret bb:$dst, bb:$from)]>; 178} 179 180let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1, 181 usesCustomInserter = 1 in 182def CATCHPAD : I<0, Pseudo, (outs), (ins), "# CATCHPAD", [(catchpad)]>; 183 184// This instruction is responsible for re-establishing stack pointers after an 185// exception has been caught and we are rejoining normal control flow in the 186// parent function or funclet. It generally sets ESP and EBP, and optionally 187// ESI. It is only needed for 32-bit WinEH, as the runtime restores CSRs for us 188// elsewhere. 189let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1 in 190def EH_RESTORE : I<0, Pseudo, (outs), (ins), "# EH_RESTORE", []>; 191 192let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1, 193 usesCustomInserter = 1 in { 194 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf), 195 "#EH_SJLJ_SETJMP32", 196 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>, 197 Requires<[Not64BitMode]>; 198 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf), 199 "#EH_SJLJ_SETJMP64", 200 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>, 201 Requires<[In64BitMode]>; 202 let isTerminator = 1 in { 203 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf), 204 "#EH_SJLJ_LONGJMP32", 205 [(X86eh_sjlj_longjmp addr:$buf)]>, 206 Requires<[Not64BitMode]>; 207 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf), 208 "#EH_SJLJ_LONGJMP64", 209 [(X86eh_sjlj_longjmp addr:$buf)]>, 210 Requires<[In64BitMode]>; 211 } 212} 213 214let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in { 215 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst), 216 "#EH_SjLj_Setup\t$dst", []>; 217} 218} // SchedRW 219 220//===----------------------------------------------------------------------===// 221// Pseudo instructions used by unwind info. 222// 223let isPseudo = 1, SchedRW = [WriteSystem] in { 224 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg), 225 "#SEH_PushReg $reg", []>; 226 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst), 227 "#SEH_SaveReg $reg, $dst", []>; 228 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst), 229 "#SEH_SaveXMM $reg, $dst", []>; 230 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size), 231 "#SEH_StackAlloc $size", []>; 232 def SEH_StackAlign : I<0, Pseudo, (outs), (ins i32imm:$align), 233 "#SEH_StackAlign $align", []>; 234 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset), 235 "#SEH_SetFrame $reg, $offset", []>; 236 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode), 237 "#SEH_PushFrame $mode", []>; 238 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins), 239 "#SEH_EndPrologue", []>; 240 def SEH_Epilogue : I<0, Pseudo, (outs), (ins), 241 "#SEH_Epilogue", []>; 242} 243 244//===----------------------------------------------------------------------===// 245// Pseudo instructions used by segmented stacks. 246// 247 248// This is lowered into a RET instruction by MCInstLower. We need 249// this so that we don't have to have a MachineBasicBlock which ends 250// with a RET and also has successors. 251let isPseudo = 1, SchedRW = [WriteJumpLd] in { 252def MORESTACK_RET: I<0, Pseudo, (outs), (ins), "", []>; 253 254// This instruction is lowered to a RET followed by a MOV. The two 255// instructions are not generated on a higher level since then the 256// verifier sees a MachineBasicBlock ending with a non-terminator. 257def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), "", []>; 258} 259 260//===----------------------------------------------------------------------===// 261// Pseudo instruction used by retguard 262 263// This is lowered to a JE 2; INT3; INT3. Prior to this pseudo should be a 264// compare instruction to ensure the retguard cookie is correct. 265// We use a pseudo here in order to avoid splitting the BB just before the return. 266// Splitting the BB and inserting a JE_1 over a new INT3 BB occasionally 267// resulted in incorrect code when a value from a byte register (CL) was 268// used as a return value. When emitted as a split BB, the single byte 269// register would sometimes be widened to 4 bytes, which would corrupt 270// the return value (ie mov %ecx, %eax instead of mov %cl, %al). 271let isCodeGenOnly = 1, Uses = [EFLAGS] in { 272def RETGUARD_JMP_TRAP: I<0, Pseudo, (outs), (ins), "", []>; 273} 274 275let isCodeGenOnly = 1 in { 276def JMP_TRAP: I<0, Pseudo, (outs), (ins), "", []>; 277} 278 279//===----------------------------------------------------------------------===// 280// Alias Instructions 281//===----------------------------------------------------------------------===// 282 283// Alias instruction mapping movr0 to xor. 284// FIXME: remove when we can teach regalloc that xor reg, reg is ok. 285let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1, 286 isPseudo = 1, isMoveImm = 1, AddedComplexity = 10 in 287def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 288 [(set GR32:$dst, 0)]>, Sched<[WriteZero]>; 289 290// Other widths can also make use of the 32-bit xor, which may have a smaller 291// encoding and avoid partial register updates. 292let AddedComplexity = 10 in { 293def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>; 294def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>; 295def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>; 296} 297 298let Predicates = [OptForSize, Not64BitMode], 299 AddedComplexity = 10 in { 300 let SchedRW = [WriteALU] in { 301 // Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC, 302 // which only require 3 bytes compared to MOV32ri which requires 5. 303 let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in { 304 def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 305 [(set GR32:$dst, 1)]>; 306 def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 307 [(set GR32:$dst, -1)]>; 308 } 309 } // SchedRW 310 311 // MOV16ri is 4 bytes, so the instructions above are smaller. 312 def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>; 313 def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>; 314} 315 316let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5, 317 SchedRW = [WriteALU] in { 318// AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1. 319def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "", 320 [(set GR32:$dst, i32immSExt8:$src)]>, 321 Requires<[OptForMinSize, NotWin64WithoutFP]>; 322def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "", 323 [(set GR64:$dst, i64immSExt8:$src)]>, 324 Requires<[OptForMinSize, NotWin64WithoutFP]>; 325} 326 327// Materialize i64 constant where top 32-bits are zero. This could theoretically 328// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however 329// that would make it more difficult to rematerialize. 330let isReMaterializable = 1, isAsCheapAsAMove = 1, 331 isPseudo = 1, hasSideEffects = 0, SchedRW = [WriteMove] in 332def MOV32ri64 : I<0, Pseudo, (outs GR64:$dst), (ins i64i32imm:$src), "", []>; 333 334// This 64-bit pseudo-move can be used for both a 64-bit constant that is 335// actually the zero-extension of a 32-bit constant and for labels in the 336// x86-64 small code model. 337def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [imm, X86Wrapper]>; 338 339def : Pat<(i64 mov64imm32:$src), (MOV32ri64 mov64imm32:$src)>; 340 341// Use sbb to materialize carry bit. 342let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in { 343// FIXME: These are pseudo ops that should be replaced with Pat<> patterns. 344// However, Pat<> can't replicate the destination reg into the inputs of the 345// result. 346def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "", 347 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; 348def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "", 349 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; 350def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "", 351 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; 352def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "", 353 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; 354} // isCodeGenOnly 355 356 357def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 358 (SETB_C16r)>; 359def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 360 (SETB_C32r)>; 361def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 362 (SETB_C64r)>; 363 364def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 365 (SETB_C16r)>; 366def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 367 (SETB_C32r)>; 368def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 369 (SETB_C64r)>; 370 371// We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and 372// will be eliminated and that the sbb can be extended up to a wider type. When 373// this happens, it is great. However, if we are left with an 8-bit sbb and an 374// and, we might as well just match it as a setb. 375def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), 376 (SETCCr (i8 2))>; 377 378// Patterns to give priority when both inputs are zero so that we don't use 379// an immediate for the RHS. 380// TODO: Should we use a 32-bit sbb for 8/16 to push the extract_subreg out? 381def : Pat<(X86sbb_flag (i8 0), (i8 0), EFLAGS), 382 (SBB8rr (EXTRACT_SUBREG (MOV32r0), sub_8bit), 383 (EXTRACT_SUBREG (MOV32r0), sub_8bit))>; 384def : Pat<(X86sbb_flag (i16 0), (i16 0), EFLAGS), 385 (SBB16rr (EXTRACT_SUBREG (MOV32r0), sub_16bit), 386 (EXTRACT_SUBREG (MOV32r0), sub_16bit))>; 387def : Pat<(X86sbb_flag (i32 0), (i32 0), EFLAGS), 388 (SBB32rr (MOV32r0), (MOV32r0))>; 389def : Pat<(X86sbb_flag (i64 0), (i64 0), EFLAGS), 390 (SBB64rr (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit), 391 (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit))>; 392 393//===----------------------------------------------------------------------===// 394// String Pseudo Instructions 395// 396let SchedRW = [WriteMicrocoded] in { 397let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in { 398def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), 399 "{rep;movsb (%esi), %es:(%edi)|rep movsb es:[edi], [esi]}", 400 [(X86rep_movs i8)]>, REP, AdSize32, 401 Requires<[NotLP64]>; 402def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), 403 "{rep;movsw (%esi), %es:(%edi)|rep movsw es:[edi], [esi]}", 404 [(X86rep_movs i16)]>, REP, AdSize32, OpSize16, 405 Requires<[NotLP64]>; 406def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), 407 "{rep;movsl (%esi), %es:(%edi)|rep movsd es:[edi], [esi]}", 408 [(X86rep_movs i32)]>, REP, AdSize32, OpSize32, 409 Requires<[NotLP64]>; 410def REP_MOVSQ_32 : RI<0xA5, RawFrm, (outs), (ins), 411 "{rep;movsq (%esi), %es:(%edi)|rep movsq es:[edi], [esi]}", 412 [(X86rep_movs i64)]>, REP, AdSize32, 413 Requires<[NotLP64, In64BitMode]>; 414} 415 416let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in { 417def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), 418 "{rep;movsb (%rsi), %es:(%rdi)|rep movsb es:[rdi], [rsi]}", 419 [(X86rep_movs i8)]>, REP, AdSize64, 420 Requires<[IsLP64]>; 421def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), 422 "{rep;movsw (%rsi), %es:(%rdi)|rep movsw es:[rdi], [rsi]}", 423 [(X86rep_movs i16)]>, REP, AdSize64, OpSize16, 424 Requires<[IsLP64]>; 425def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), 426 "{rep;movsl (%rsi), %es:(%rdi)|rep movsdi es:[rdi], [rsi]}", 427 [(X86rep_movs i32)]>, REP, AdSize64, OpSize32, 428 Requires<[IsLP64]>; 429def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), 430 "{rep;movsq (%rsi), %es:(%rdi)|rep movsq es:[rdi], [rsi]}", 431 [(X86rep_movs i64)]>, REP, AdSize64, 432 Requires<[IsLP64]>; 433} 434 435// FIXME: Should use "(X86rep_stos AL)" as the pattern. 436let Defs = [ECX,EDI], isCodeGenOnly = 1 in { 437 let Uses = [AL,ECX,EDI] in 438 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), 439 "{rep;stosb %al, %es:(%edi)|rep stosb es:[edi], al}", 440 [(X86rep_stos i8)]>, REP, AdSize32, 441 Requires<[NotLP64]>; 442 let Uses = [AX,ECX,EDI] in 443 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), 444 "{rep;stosw %ax, %es:(%edi)|rep stosw es:[edi], ax}", 445 [(X86rep_stos i16)]>, REP, AdSize32, OpSize16, 446 Requires<[NotLP64]>; 447 let Uses = [EAX,ECX,EDI] in 448 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), 449 "{rep;stosl %eax, %es:(%edi)|rep stosd es:[edi], eax}", 450 [(X86rep_stos i32)]>, REP, AdSize32, OpSize32, 451 Requires<[NotLP64]>; 452 let Uses = [RAX,RCX,RDI] in 453 def REP_STOSQ_32 : RI<0xAB, RawFrm, (outs), (ins), 454 "{rep;stosq %rax, %es:(%edi)|rep stosq es:[edi], rax}", 455 [(X86rep_stos i64)]>, REP, AdSize32, 456 Requires<[NotLP64, In64BitMode]>; 457} 458 459let Defs = [RCX,RDI], isCodeGenOnly = 1 in { 460 let Uses = [AL,RCX,RDI] in 461 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), 462 "{rep;stosb %al, %es:(%rdi)|rep stosb es:[rdi], al}", 463 [(X86rep_stos i8)]>, REP, AdSize64, 464 Requires<[IsLP64]>; 465 let Uses = [AX,RCX,RDI] in 466 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), 467 "{rep;stosw %ax, %es:(%rdi)|rep stosw es:[rdi], ax}", 468 [(X86rep_stos i16)]>, REP, AdSize64, OpSize16, 469 Requires<[IsLP64]>; 470 let Uses = [RAX,RCX,RDI] in 471 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), 472 "{rep;stosl %eax, %es:(%rdi)|rep stosd es:[rdi], eax}", 473 [(X86rep_stos i32)]>, REP, AdSize64, OpSize32, 474 Requires<[IsLP64]>; 475 476 let Uses = [RAX,RCX,RDI] in 477 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), 478 "{rep;stosq %rax, %es:(%rdi)|rep stosq es:[rdi], rax}", 479 [(X86rep_stos i64)]>, REP, AdSize64, 480 Requires<[IsLP64]>; 481} 482} // SchedRW 483 484//===----------------------------------------------------------------------===// 485// Thread Local Storage Instructions 486// 487let SchedRW = [WriteSystem] in { 488 489// ELF TLS Support 490// All calls clobber the non-callee saved registers. ESP is marked as 491// a use to prevent stack-pointer assignments that appear immediately 492// before calls from potentially appearing dead. 493let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7, 494 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7, 495 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 496 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 497 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF], 498 usesCustomInserter = 1, Uses = [ESP, SSP] in { 499def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 500 "# TLS_addr32", 501 [(X86tlsaddr tls32addr:$sym)]>, 502 Requires<[Not64BitMode]>; 503def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 504 "# TLS_base_addr32", 505 [(X86tlsbaseaddr tls32baseaddr:$sym)]>, 506 Requires<[Not64BitMode]>; 507} 508 509// All calls clobber the non-callee saved registers. RSP is marked as 510// a use to prevent stack-pointer assignments that appear immediately 511// before calls from potentially appearing dead. 512let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, 513 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7, 514 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7, 515 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 516 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 517 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF], 518 usesCustomInserter = 1, Uses = [RSP, SSP] in { 519def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 520 "# TLS_addr64", 521 [(X86tlsaddr tls64addr:$sym)]>, 522 Requires<[In64BitMode]>; 523def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 524 "# TLS_base_addr64", 525 [(X86tlsbaseaddr tls64baseaddr:$sym)]>, 526 Requires<[In64BitMode]>; 527} 528 529// Darwin TLS Support 530// For i386, the address of the thunk is passed on the stack, on return the 531// address of the variable is in %eax. %ecx is trashed during the function 532// call. All other registers are preserved. 533let Defs = [EAX, ECX, EFLAGS, DF], 534 Uses = [ESP, SSP], 535 usesCustomInserter = 1 in 536def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 537 "# TLSCall_32", 538 [(X86TLSCall addr:$sym)]>, 539 Requires<[Not64BitMode]>; 540 541// For x86_64, the address of the thunk is passed in %rdi, but the 542// pseudo directly use the symbol, so do not add an implicit use of 543// %rdi. The lowering will do the right thing with RDI. 544// On return the address of the variable is in %rax. All other 545// registers are preserved. 546let Defs = [RAX, EFLAGS, DF], 547 Uses = [RSP, SSP], 548 usesCustomInserter = 1 in 549def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 550 "# TLSCall_64", 551 [(X86TLSCall addr:$sym)]>, 552 Requires<[In64BitMode]>; 553} // SchedRW 554 555//===----------------------------------------------------------------------===// 556// Conditional Move Pseudo Instructions 557 558// CMOV* - Used to implement the SELECT DAG operation. Expanded after 559// instruction selection into a branch sequence. 560multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> { 561 def CMOV#NAME : I<0, Pseudo, 562 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond), 563 "#CMOV_"#NAME#" PSEUDO!", 564 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, timm:$cond, 565 EFLAGS)))]>; 566} 567 568let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in { 569 // X86 doesn't have 8-bit conditional moves. Use a customInserter to 570 // emit control flow. An alternative to this is to mark i8 SELECT as Promote, 571 // however that requires promoting the operands, and can induce additional 572 // i8 register pressure. 573 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>; 574 575 let Predicates = [NoCMov] in { 576 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>; 577 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>; 578 } // Predicates = [NoCMov] 579 580 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no 581 // SSE1/SSE2. 582 let Predicates = [FPStackf32] in 583 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>; 584 585 let Predicates = [FPStackf64] in 586 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>; 587 588 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>; 589 590 let Predicates = [NoAVX512] in { 591 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>; 592 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>; 593 } 594 let Predicates = [HasAVX512] in { 595 defm _FR32X : CMOVrr_PSEUDO<FR32X, f32>; 596 defm _FR64X : CMOVrr_PSEUDO<FR64X, f64>; 597 } 598 let Predicates = [NoVLX] in { 599 defm _VR128 : CMOVrr_PSEUDO<VR128, v2i64>; 600 defm _VR256 : CMOVrr_PSEUDO<VR256, v4i64>; 601 } 602 let Predicates = [HasVLX] in { 603 defm _VR128X : CMOVrr_PSEUDO<VR128X, v2i64>; 604 defm _VR256X : CMOVrr_PSEUDO<VR256X, v4i64>; 605 } 606 defm _VR512 : CMOVrr_PSEUDO<VR512, v8i64>; 607 defm _VK2 : CMOVrr_PSEUDO<VK2, v2i1>; 608 defm _VK4 : CMOVrr_PSEUDO<VK4, v4i1>; 609 defm _VK8 : CMOVrr_PSEUDO<VK8, v8i1>; 610 defm _VK16 : CMOVrr_PSEUDO<VK16, v16i1>; 611 defm _VK32 : CMOVrr_PSEUDO<VK32, v32i1>; 612 defm _VK64 : CMOVrr_PSEUDO<VK64, v64i1>; 613} // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] 614 615def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 616 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 617 618let Predicates = [NoVLX] in { 619 def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 620 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 621 def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 622 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 623 def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 624 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 625 def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 626 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 627 def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 628 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 629 630 def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 631 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 632 def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 633 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 634 def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 635 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 636 def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 637 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 638 def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 639 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 640} 641let Predicates = [HasVLX] in { 642 def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 643 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 644 def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 645 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 646 def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 647 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 648 def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 649 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 650 def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 651 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 652 653 def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 654 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 655 def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 656 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 657 def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 658 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 659 def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 660 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 661 def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 662 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 663} 664 665def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 666 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 667def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 668 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 669def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 670 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 671def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 672 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 673def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 674 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 675 676//===----------------------------------------------------------------------===// 677// Normal-Instructions-With-Lock-Prefix Pseudo Instructions 678//===----------------------------------------------------------------------===// 679 680// FIXME: Use normal instructions and add lock prefix dynamically. 681 682// Memory barriers 683 684let isCodeGenOnly = 1, Defs = [EFLAGS] in 685def OR32mi8Locked : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$zero), 686 "or{l}\t{$zero, $dst|$dst, $zero}", []>, 687 Requires<[Not64BitMode]>, OpSize32, LOCK, 688 Sched<[WriteALURMW]>; 689 690let hasSideEffects = 1 in 691def Int_MemBarrier : I<0, Pseudo, (outs), (ins), 692 "#MEMBARRIER", 693 [(X86MemBarrier)]>, Sched<[WriteLoad]>; 694 695// RegOpc corresponds to the mr version of the instruction 696// ImmOpc corresponds to the mi version of the instruction 697// ImmOpc8 corresponds to the mi8 version of the instruction 698// ImmMod corresponds to the instruction format of the mi and mi8 versions 699multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8, 700 Format ImmMod, SDNode Op, string mnemonic> { 701let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1, 702 SchedRW = [WriteALURMW] in { 703 704def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 705 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 }, 706 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2), 707 !strconcat(mnemonic, "{b}\t", 708 "{$src2, $dst|$dst, $src2}"), 709 [(set EFLAGS, (Op addr:$dst, GR8:$src2))]>, LOCK; 710 711def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 712 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 713 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), 714 !strconcat(mnemonic, "{w}\t", 715 "{$src2, $dst|$dst, $src2}"), 716 [(set EFLAGS, (Op addr:$dst, GR16:$src2))]>, 717 OpSize16, LOCK; 718 719def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 720 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 721 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), 722 !strconcat(mnemonic, "{l}\t", 723 "{$src2, $dst|$dst, $src2}"), 724 [(set EFLAGS, (Op addr:$dst, GR32:$src2))]>, 725 OpSize32, LOCK; 726 727def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 728 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 729 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), 730 !strconcat(mnemonic, "{q}\t", 731 "{$src2, $dst|$dst, $src2}"), 732 [(set EFLAGS, (Op addr:$dst, GR64:$src2))]>, LOCK; 733 734// NOTE: These are order specific, we want the mi8 forms to be listed 735// first so that they are slightly preferred to the mi forms. 736def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 737 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 738 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2), 739 !strconcat(mnemonic, "{w}\t", 740 "{$src2, $dst|$dst, $src2}"), 741 [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))]>, 742 OpSize16, LOCK; 743 744def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 745 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 746 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2), 747 !strconcat(mnemonic, "{l}\t", 748 "{$src2, $dst|$dst, $src2}"), 749 [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))]>, 750 OpSize32, LOCK; 751 752def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 753 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 754 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2), 755 !strconcat(mnemonic, "{q}\t", 756 "{$src2, $dst|$dst, $src2}"), 757 [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))]>, 758 LOCK; 759 760def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 761 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 }, 762 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2), 763 !strconcat(mnemonic, "{b}\t", 764 "{$src2, $dst|$dst, $src2}"), 765 [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))]>, LOCK; 766 767def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 768 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 769 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2), 770 !strconcat(mnemonic, "{w}\t", 771 "{$src2, $dst|$dst, $src2}"), 772 [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))]>, 773 OpSize16, LOCK; 774 775def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 776 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 777 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2), 778 !strconcat(mnemonic, "{l}\t", 779 "{$src2, $dst|$dst, $src2}"), 780 [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))]>, 781 OpSize32, LOCK; 782 783def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 784 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 785 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2), 786 !strconcat(mnemonic, "{q}\t", 787 "{$src2, $dst|$dst, $src2}"), 788 [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))]>, 789 LOCK; 790} 791 792} 793 794defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">; 795defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">; 796defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">; 797defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">; 798defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">; 799 800def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs), 801 (X86lock_add node:$lhs, node:$rhs), [{ 802 return hasNoCarryFlagUses(SDValue(N, 0)); 803}]>; 804 805def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs), 806 (X86lock_sub node:$lhs, node:$rhs), [{ 807 return hasNoCarryFlagUses(SDValue(N, 0)); 808}]>; 809 810let Predicates = [UseIncDec] in { 811 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1, 812 SchedRW = [WriteALURMW] in { 813 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), 814 "inc{b}\t$dst", 815 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i8 1)))]>, 816 LOCK; 817 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), 818 "inc{w}\t$dst", 819 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i16 1)))]>, 820 OpSize16, LOCK; 821 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), 822 "inc{l}\t$dst", 823 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i32 1)))]>, 824 OpSize32, LOCK; 825 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), 826 "inc{q}\t$dst", 827 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i64 1)))]>, 828 LOCK; 829 830 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), 831 "dec{b}\t$dst", 832 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i8 1)))]>, 833 LOCK; 834 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), 835 "dec{w}\t$dst", 836 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i16 1)))]>, 837 OpSize16, LOCK; 838 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), 839 "dec{l}\t$dst", 840 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i32 1)))]>, 841 OpSize32, LOCK; 842 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), 843 "dec{q}\t$dst", 844 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i64 1)))]>, 845 LOCK; 846 } 847 848 // Additional patterns for -1 constant. 849 def : Pat<(X86lock_add addr:$dst, (i8 -1)), (LOCK_DEC8m addr:$dst)>; 850 def : Pat<(X86lock_add addr:$dst, (i16 -1)), (LOCK_DEC16m addr:$dst)>; 851 def : Pat<(X86lock_add addr:$dst, (i32 -1)), (LOCK_DEC32m addr:$dst)>; 852 def : Pat<(X86lock_add addr:$dst, (i64 -1)), (LOCK_DEC64m addr:$dst)>; 853 def : Pat<(X86lock_sub addr:$dst, (i8 -1)), (LOCK_INC8m addr:$dst)>; 854 def : Pat<(X86lock_sub addr:$dst, (i16 -1)), (LOCK_INC16m addr:$dst)>; 855 def : Pat<(X86lock_sub addr:$dst, (i32 -1)), (LOCK_INC32m addr:$dst)>; 856 def : Pat<(X86lock_sub addr:$dst, (i64 -1)), (LOCK_INC64m addr:$dst)>; 857} 858 859// Atomic compare and swap. 860multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic, 861 SDPatternOperator frag, X86MemOperand x86memop> { 862let isCodeGenOnly = 1, usesCustomInserter = 1 in { 863 def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr), 864 !strconcat(mnemonic, "\t$ptr"), 865 [(frag addr:$ptr)]>, TB, LOCK; 866} 867} 868 869multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form, 870 string mnemonic, SDPatternOperator frag> { 871let isCodeGenOnly = 1, SchedRW = [WriteCMPXCHGRMW] in { 872 let Defs = [AL, EFLAGS], Uses = [AL] in 873 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap), 874 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"), 875 [(frag addr:$ptr, GR8:$swap, 1)]>, TB, LOCK; 876 let Defs = [AX, EFLAGS], Uses = [AX] in 877 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap), 878 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"), 879 [(frag addr:$ptr, GR16:$swap, 2)]>, TB, OpSize16, LOCK; 880 let Defs = [EAX, EFLAGS], Uses = [EAX] in 881 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap), 882 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"), 883 [(frag addr:$ptr, GR32:$swap, 4)]>, TB, OpSize32, LOCK; 884 let Defs = [RAX, EFLAGS], Uses = [RAX] in 885 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap), 886 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"), 887 [(frag addr:$ptr, GR64:$swap, 8)]>, TB, LOCK; 888} 889} 890 891let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX], 892 Predicates = [HasCmpxchg8b], SchedRW = [WriteCMPXCHGRMW] in { 893defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b", X86cas8, i64mem>; 894} 895 896// This pseudo must be used when the frame uses RBX as 897// the base pointer. Indeed, in such situation RBX is a reserved 898// register and the register allocator will ignore any use/def of 899// it. In other words, the register will not fix the clobbering of 900// RBX that will happen when setting the arguments for the instrucion. 901// 902// Unlike the actual related instuction, we mark that this one 903// defines EBX (instead of using EBX). 904// The rationale is that we will define RBX during the expansion of 905// the pseudo. The argument feeding EBX is ebx_input. 906// 907// The additional argument, $ebx_save, is a temporary register used to 908// save the value of RBX across the actual instruction. 909// 910// To make sure the register assigned to $ebx_save does not interfere with 911// the definition of the actual instruction, we use a definition $dst which 912// is tied to $rbx_save. That way, the live-range of $rbx_save spans across 913// the instruction and we are sure we will have a valid register to restore 914// the value of RBX. 915let Defs = [EAX, EDX, EBX, EFLAGS], Uses = [EAX, ECX, EDX], 916 Predicates = [HasCmpxchg8b], SchedRW = [WriteCMPXCHGRMW], 917 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$ebx_save = $dst", 918 usesCustomInserter = 1 in { 919def LCMPXCHG8B_SAVE_EBX : 920 I<0, Pseudo, (outs GR32:$dst), 921 (ins i64mem:$ptr, GR32:$ebx_input, GR32:$ebx_save), 922 !strconcat("cmpxchg8b", "\t$ptr"), 923 [(set GR32:$dst, (X86cas8save_ebx addr:$ptr, GR32:$ebx_input, 924 GR32:$ebx_save))]>; 925} 926 927 928let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX], 929 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW] in { 930defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b", 931 X86cas16, i128mem>, REX_W; 932} 933 934// Same as LCMPXCHG8B_SAVE_RBX but for the 16 Bytes variant. 935let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX], 936 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW], 937 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst", 938 usesCustomInserter = 1 in { 939def LCMPXCHG16B_SAVE_RBX : 940 I<0, Pseudo, (outs GR64:$dst), 941 (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save), 942 !strconcat("cmpxchg16b", "\t$ptr"), 943 [(set GR64:$dst, (X86cas16save_rbx addr:$ptr, GR64:$rbx_input, 944 GR64:$rbx_save))]>; 945} 946 947defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", X86cas>; 948 949// Atomic exchange and add 950multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic, 951 string frag> { 952 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1, 953 SchedRW = [WriteALURMW] in { 954 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst), 955 (ins GR8:$val, i8mem:$ptr), 956 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"), 957 [(set GR8:$dst, 958 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>; 959 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst), 960 (ins GR16:$val, i16mem:$ptr), 961 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"), 962 [(set 963 GR16:$dst, 964 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>, 965 OpSize16; 966 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst), 967 (ins GR32:$val, i32mem:$ptr), 968 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"), 969 [(set 970 GR32:$dst, 971 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>, 972 OpSize32; 973 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst), 974 (ins GR64:$val, i64mem:$ptr), 975 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"), 976 [(set 977 GR64:$dst, 978 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>; 979 } 980} 981 982defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add">, TB, LOCK; 983 984/* The following multiclass tries to make sure that in code like 985 * x.store (immediate op x.load(acquire), release) 986 * and 987 * x.store (register op x.load(acquire), release) 988 * an operation directly on memory is generated instead of wasting a register. 989 * It is not automatic as atomic_store/load are only lowered to MOV instructions 990 * extremely late to prevent them from being accidentally reordered in the backend 991 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions) 992 */ 993multiclass RELEASE_BINOP_MI<string Name, SDNode op> { 994 def : Pat<(atomic_store_8 addr:$dst, 995 (op (atomic_load_8 addr:$dst), (i8 imm:$src))), 996 (!cast<Instruction>(Name#"8mi") addr:$dst, imm:$src)>; 997 def : Pat<(atomic_store_16 addr:$dst, 998 (op (atomic_load_16 addr:$dst), (i16 imm:$src))), 999 (!cast<Instruction>(Name#"16mi") addr:$dst, imm:$src)>; 1000 def : Pat<(atomic_store_32 addr:$dst, 1001 (op (atomic_load_32 addr:$dst), (i32 imm:$src))), 1002 (!cast<Instruction>(Name#"32mi") addr:$dst, imm:$src)>; 1003 def : Pat<(atomic_store_64 addr:$dst, 1004 (op (atomic_load_64 addr:$dst), (i64immSExt32:$src))), 1005 (!cast<Instruction>(Name#"64mi32") addr:$dst, (i64immSExt32:$src))>; 1006 1007 def : Pat<(atomic_store_8 addr:$dst, 1008 (op (atomic_load_8 addr:$dst), (i8 GR8:$src))), 1009 (!cast<Instruction>(Name#"8mr") addr:$dst, GR8:$src)>; 1010 def : Pat<(atomic_store_16 addr:$dst, 1011 (op (atomic_load_16 addr:$dst), (i16 GR16:$src))), 1012 (!cast<Instruction>(Name#"16mr") addr:$dst, GR16:$src)>; 1013 def : Pat<(atomic_store_32 addr:$dst, 1014 (op (atomic_load_32 addr:$dst), (i32 GR32:$src))), 1015 (!cast<Instruction>(Name#"32mr") addr:$dst, GR32:$src)>; 1016 def : Pat<(atomic_store_64 addr:$dst, 1017 (op (atomic_load_64 addr:$dst), (i64 GR64:$src))), 1018 (!cast<Instruction>(Name#"64mr") addr:$dst, GR64:$src)>; 1019} 1020defm : RELEASE_BINOP_MI<"ADD", add>; 1021defm : RELEASE_BINOP_MI<"AND", and>; 1022defm : RELEASE_BINOP_MI<"OR", or>; 1023defm : RELEASE_BINOP_MI<"XOR", xor>; 1024defm : RELEASE_BINOP_MI<"SUB", sub>; 1025 1026// Atomic load + floating point patterns. 1027// FIXME: This could also handle SIMD operations with *ps and *pd instructions. 1028multiclass ATOMIC_LOAD_FP_BINOP_MI<string Name, SDNode op> { 1029 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 1030 (!cast<Instruction>(Name#"SSrm") FR32:$src1, addr:$src2)>, 1031 Requires<[UseSSE1]>; 1032 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 1033 (!cast<Instruction>("V"#Name#"SSrm") FR32:$src1, addr:$src2)>, 1034 Requires<[UseAVX]>; 1035 def : Pat<(op FR32X:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 1036 (!cast<Instruction>("V"#Name#"SSZrm") FR32X:$src1, addr:$src2)>, 1037 Requires<[HasAVX512]>; 1038 1039 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 1040 (!cast<Instruction>(Name#"SDrm") FR64:$src1, addr:$src2)>, 1041 Requires<[UseSSE1]>; 1042 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 1043 (!cast<Instruction>("V"#Name#"SDrm") FR64:$src1, addr:$src2)>, 1044 Requires<[UseAVX]>; 1045 def : Pat<(op FR64X:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 1046 (!cast<Instruction>("V"#Name#"SDZrm") FR64X:$src1, addr:$src2)>, 1047 Requires<[HasAVX512]>; 1048} 1049defm : ATOMIC_LOAD_FP_BINOP_MI<"ADD", fadd>; 1050// FIXME: Add fsub, fmul, fdiv, ... 1051 1052multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32, 1053 dag dag64> { 1054 def : Pat<(atomic_store_8 addr:$dst, dag8), 1055 (!cast<Instruction>(Name#8m) addr:$dst)>; 1056 def : Pat<(atomic_store_16 addr:$dst, dag16), 1057 (!cast<Instruction>(Name#16m) addr:$dst)>; 1058 def : Pat<(atomic_store_32 addr:$dst, dag32), 1059 (!cast<Instruction>(Name#32m) addr:$dst)>; 1060 def : Pat<(atomic_store_64 addr:$dst, dag64), 1061 (!cast<Instruction>(Name#64m) addr:$dst)>; 1062} 1063 1064let Predicates = [UseIncDec] in { 1065 defm : RELEASE_UNOP<"INC", 1066 (add (atomic_load_8 addr:$dst), (i8 1)), 1067 (add (atomic_load_16 addr:$dst), (i16 1)), 1068 (add (atomic_load_32 addr:$dst), (i32 1)), 1069 (add (atomic_load_64 addr:$dst), (i64 1))>; 1070 defm : RELEASE_UNOP<"DEC", 1071 (add (atomic_load_8 addr:$dst), (i8 -1)), 1072 (add (atomic_load_16 addr:$dst), (i16 -1)), 1073 (add (atomic_load_32 addr:$dst), (i32 -1)), 1074 (add (atomic_load_64 addr:$dst), (i64 -1))>; 1075} 1076 1077defm : RELEASE_UNOP<"NEG", 1078 (ineg (i8 (atomic_load_8 addr:$dst))), 1079 (ineg (i16 (atomic_load_16 addr:$dst))), 1080 (ineg (i32 (atomic_load_32 addr:$dst))), 1081 (ineg (i64 (atomic_load_64 addr:$dst)))>; 1082defm : RELEASE_UNOP<"NOT", 1083 (not (i8 (atomic_load_8 addr:$dst))), 1084 (not (i16 (atomic_load_16 addr:$dst))), 1085 (not (i32 (atomic_load_32 addr:$dst))), 1086 (not (i64 (atomic_load_64 addr:$dst)))>; 1087 1088def : Pat<(atomic_store_8 addr:$dst, (i8 imm:$src)), 1089 (MOV8mi addr:$dst, imm:$src)>; 1090def : Pat<(atomic_store_16 addr:$dst, (i16 imm:$src)), 1091 (MOV16mi addr:$dst, imm:$src)>; 1092def : Pat<(atomic_store_32 addr:$dst, (i32 imm:$src)), 1093 (MOV32mi addr:$dst, imm:$src)>; 1094def : Pat<(atomic_store_64 addr:$dst, (i64immSExt32:$src)), 1095 (MOV64mi32 addr:$dst, i64immSExt32:$src)>; 1096 1097def : Pat<(atomic_store_8 addr:$dst, GR8:$src), 1098 (MOV8mr addr:$dst, GR8:$src)>; 1099def : Pat<(atomic_store_16 addr:$dst, GR16:$src), 1100 (MOV16mr addr:$dst, GR16:$src)>; 1101def : Pat<(atomic_store_32 addr:$dst, GR32:$src), 1102 (MOV32mr addr:$dst, GR32:$src)>; 1103def : Pat<(atomic_store_64 addr:$dst, GR64:$src), 1104 (MOV64mr addr:$dst, GR64:$src)>; 1105 1106def : Pat<(i8 (atomic_load_8 addr:$src)), (MOV8rm addr:$src)>; 1107def : Pat<(i16 (atomic_load_16 addr:$src)), (MOV16rm addr:$src)>; 1108def : Pat<(i32 (atomic_load_32 addr:$src)), (MOV32rm addr:$src)>; 1109def : Pat<(i64 (atomic_load_64 addr:$src)), (MOV64rm addr:$src)>; 1110 1111// Floating point loads/stores. 1112def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1113 (MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>; 1114def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1115 (VMOVSSmr addr:$dst, FR32:$src)>, Requires<[UseAVX]>; 1116def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1117 (VMOVSSZmr addr:$dst, FR32:$src)>, Requires<[HasAVX512]>; 1118 1119def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1120 (MOVSDmr addr:$dst, FR64:$src)>, Requires<[UseSSE2]>; 1121def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1122 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[UseAVX]>; 1123def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1124 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[HasAVX512]>; 1125 1126def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1127 (MOVSSrm_alt addr:$src)>, Requires<[UseSSE1]>; 1128def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1129 (VMOVSSrm_alt addr:$src)>, Requires<[UseAVX]>; 1130def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1131 (VMOVSSZrm_alt addr:$src)>, Requires<[HasAVX512]>; 1132 1133def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1134 (MOVSDrm_alt addr:$src)>, Requires<[UseSSE2]>; 1135def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1136 (VMOVSDrm_alt addr:$src)>, Requires<[UseAVX]>; 1137def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1138 (VMOVSDZrm_alt addr:$src)>, Requires<[HasAVX512]>; 1139 1140//===----------------------------------------------------------------------===// 1141// DAG Pattern Matching Rules 1142//===----------------------------------------------------------------------===// 1143 1144// Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves 1145// binary size compared to a regular MOV, but it introduces an unnecessary 1146// load, so is not suitable for regular or optsize functions. 1147let Predicates = [OptForMinSize] in { 1148def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>; 1149def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>; 1150def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>; 1151def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>; 1152def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>; 1153def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>; 1154} 1155 1156// In kernel code model, we can get the address of a label 1157// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of 1158// the MOV64ri32 should accept these. 1159def : Pat<(i64 (X86Wrapper tconstpool :$dst)), 1160 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>; 1161def : Pat<(i64 (X86Wrapper tjumptable :$dst)), 1162 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>; 1163def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), 1164 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>; 1165def : Pat<(i64 (X86Wrapper texternalsym:$dst)), 1166 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>; 1167def : Pat<(i64 (X86Wrapper mcsym:$dst)), 1168 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>; 1169def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), 1170 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>; 1171 1172// If we have small model and -static mode, it is safe to store global addresses 1173// directly as immediates. FIXME: This is really a hack, the 'imm' predicate 1174// for MOV64mi32 should handle this sort of thing. 1175def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst), 1176 (MOV64mi32 addr:$dst, tconstpool:$src)>, 1177 Requires<[NearData, IsNotPIC]>; 1178def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst), 1179 (MOV64mi32 addr:$dst, tjumptable:$src)>, 1180 Requires<[NearData, IsNotPIC]>; 1181def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst), 1182 (MOV64mi32 addr:$dst, tglobaladdr:$src)>, 1183 Requires<[NearData, IsNotPIC]>; 1184def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst), 1185 (MOV64mi32 addr:$dst, texternalsym:$src)>, 1186 Requires<[NearData, IsNotPIC]>; 1187def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst), 1188 (MOV64mi32 addr:$dst, mcsym:$src)>, 1189 Requires<[NearData, IsNotPIC]>; 1190def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst), 1191 (MOV64mi32 addr:$dst, tblockaddress:$src)>, 1192 Requires<[NearData, IsNotPIC]>; 1193 1194def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>; 1195def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>; 1196 1197// Calls 1198 1199// tls has some funny stuff here... 1200// This corresponds to movabs $foo@tpoff, %rax 1201def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)), 1202 (MOV64ri32 tglobaltlsaddr :$dst)>; 1203// This corresponds to add $foo@tpoff, %rax 1204def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)), 1205 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>; 1206 1207 1208// Direct PC relative function call for small code model. 32-bit displacement 1209// sign extended to 64-bit. 1210def : Pat<(X86call (i64 tglobaladdr:$dst)), 1211 (CALL64pcrel32 tglobaladdr:$dst)>; 1212def : Pat<(X86call (i64 texternalsym:$dst)), 1213 (CALL64pcrel32 texternalsym:$dst)>; 1214 1215// Tailcall stuff. The TCRETURN instructions execute after the epilog, so they 1216// can never use callee-saved registers. That is the purpose of the GR64_TC 1217// register classes. 1218// 1219// The only volatile register that is never used by the calling convention is 1220// %r11. This happens when calling a vararg function with 6 arguments. 1221// 1222// Match an X86tcret that uses less than 7 volatile registers. 1223def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off), 1224 (X86tcret node:$ptr, node:$off), [{ 1225 // X86tcret args: (*chain, ptr, imm, regs..., glue) 1226 unsigned NumRegs = 0; 1227 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i) 1228 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6) 1229 return false; 1230 return true; 1231}]>; 1232 1233def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off), 1234 (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>, 1235 Requires<[Not64BitMode, NotUseIndirectThunkCalls]>; 1236 1237// FIXME: This is disabled for 32-bit PIC mode because the global base 1238// register which is part of the address mode may be assigned a 1239// callee-saved register. 1240def : Pat<(X86tcret (load addr:$dst), imm:$off), 1241 (TCRETURNmi addr:$dst, imm:$off)>, 1242 Requires<[Not64BitMode, IsNotPIC, NotUseIndirectThunkCalls]>; 1243 1244def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off), 1245 (TCRETURNdi tglobaladdr:$dst, imm:$off)>, 1246 Requires<[NotLP64]>; 1247 1248def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off), 1249 (TCRETURNdi texternalsym:$dst, imm:$off)>, 1250 Requires<[NotLP64]>; 1251 1252def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off), 1253 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>, 1254 Requires<[In64BitMode, NotUseIndirectThunkCalls]>; 1255 1256// Don't fold loads into X86tcret requiring more than 6 regs. 1257// There wouldn't be enough scratch registers for base+index. 1258def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off), 1259 (TCRETURNmi64 addr:$dst, imm:$off)>, 1260 Requires<[In64BitMode, NotUseIndirectThunkCalls]>; 1261 1262def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off), 1263 (INDIRECT_THUNK_TCRETURN64 ptr_rc_tailcall:$dst, imm:$off)>, 1264 Requires<[In64BitMode, UseIndirectThunkCalls]>; 1265 1266def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off), 1267 (INDIRECT_THUNK_TCRETURN32 ptr_rc_tailcall:$dst, imm:$off)>, 1268 Requires<[Not64BitMode, UseIndirectThunkCalls]>; 1269 1270def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off), 1271 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>, 1272 Requires<[IsLP64]>; 1273 1274def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off), 1275 (TCRETURNdi64 texternalsym:$dst, imm:$off)>, 1276 Requires<[IsLP64]>; 1277 1278// Normal calls, with various flavors of addresses. 1279def : Pat<(X86call (i32 tglobaladdr:$dst)), 1280 (CALLpcrel32 tglobaladdr:$dst)>; 1281def : Pat<(X86call (i32 texternalsym:$dst)), 1282 (CALLpcrel32 texternalsym:$dst)>; 1283def : Pat<(X86call (i32 imm:$dst)), 1284 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>; 1285 1286// Comparisons. 1287 1288// TEST R,R is smaller than CMP R,0 1289def : Pat<(X86cmp GR8:$src1, 0), 1290 (TEST8rr GR8:$src1, GR8:$src1)>; 1291def : Pat<(X86cmp GR16:$src1, 0), 1292 (TEST16rr GR16:$src1, GR16:$src1)>; 1293def : Pat<(X86cmp GR32:$src1, 0), 1294 (TEST32rr GR32:$src1, GR32:$src1)>; 1295def : Pat<(X86cmp GR64:$src1, 0), 1296 (TEST64rr GR64:$src1, GR64:$src1)>; 1297 1298// zextload bool -> zextload byte 1299// i1 stored in one byte in zero-extended form. 1300// Upper bits cleanup should be executed before Store. 1301def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>; 1302def : Pat<(zextloadi16i1 addr:$src), 1303 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1304def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; 1305def : Pat<(zextloadi64i1 addr:$src), 1306 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1307 1308// extload bool -> extload byte 1309// When extloading from 16-bit and smaller memory locations into 64-bit 1310// registers, use zero-extending loads so that the entire 64-bit register is 1311// defined, avoiding partial-register updates. 1312 1313def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>; 1314def : Pat<(extloadi16i1 addr:$src), 1315 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1316def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; 1317def : Pat<(extloadi16i8 addr:$src), 1318 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1319def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>; 1320def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>; 1321 1322// For other extloads, use subregs, since the high contents of the register are 1323// defined after an extload. 1324// NOTE: The extloadi64i32 pattern needs to be first as it will try to form 1325// 32-bit loads for 4 byte aligned i8/i16 loads. 1326def : Pat<(extloadi64i32 addr:$src), 1327 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>; 1328def : Pat<(extloadi64i1 addr:$src), 1329 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1330def : Pat<(extloadi64i8 addr:$src), 1331 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1332def : Pat<(extloadi64i16 addr:$src), 1333 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>; 1334 1335// anyext. Define these to do an explicit zero-extend to 1336// avoid partial-register updates. 1337def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG 1338 (MOVZX32rr8 GR8 :$src), sub_16bit)>; 1339def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>; 1340 1341// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32. 1342def : Pat<(i32 (anyext GR16:$src)), 1343 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>; 1344 1345def : Pat<(i64 (anyext GR8 :$src)), 1346 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>; 1347def : Pat<(i64 (anyext GR16:$src)), 1348 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>; 1349def : Pat<(i64 (anyext GR32:$src)), 1350 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>; 1351 1352// If this is an anyext of the remainder of an 8-bit sdivrem, use a MOVSX 1353// instead of a MOVZX. The sdivrem lowering will emit emit a MOVSX to move 1354// %ah to the lower byte of a register. By using a MOVSX here we allow a 1355// post-isel peephole to merge the two MOVSX instructions into one. 1356def anyext_sdiv : PatFrag<(ops node:$lhs), (anyext node:$lhs),[{ 1357 return (N->getOperand(0).getOpcode() == ISD::SDIVREM && 1358 N->getOperand(0).getResNo() == 1); 1359}]>; 1360def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>; 1361 1362// Any instruction that defines a 32-bit result leaves the high half of the 1363// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may 1364// be copying from a truncate. Any other 32-bit operation will zero-extend 1365// up to 64 bits. AssertSext/AssertZext aren't saying anything about the upper 1366// 32 bits, they're probably just qualifying a CopyFromReg. 1367def def32 : PatLeaf<(i32 GR32:$src), [{ 1368 return N->getOpcode() != ISD::TRUNCATE && 1369 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG && 1370 N->getOpcode() != ISD::CopyFromReg && 1371 N->getOpcode() != ISD::AssertSext && 1372 N->getOpcode() != ISD::AssertZext; 1373}]>; 1374 1375// In the case of a 32-bit def that is known to implicitly zero-extend, 1376// we can use a SUBREG_TO_REG. 1377def : Pat<(i64 (zext def32:$src)), 1378 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; 1379def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)), 1380 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; 1381 1382//===----------------------------------------------------------------------===// 1383// Pattern match OR as ADD 1384//===----------------------------------------------------------------------===// 1385 1386// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be 1387// 3-addressified into an LEA instruction to avoid copies. However, we also 1388// want to finally emit these instructions as an or at the end of the code 1389// generator to make the generated code easier to read. To do this, we select 1390// into "disjoint bits" pseudo ops. 1391 1392// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero. 1393def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{ 1394 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1))) 1395 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue()); 1396 1397 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0); 1398 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0); 1399 return (~Known0.Zero & ~Known1.Zero) == 0; 1400}]>; 1401 1402 1403// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits. 1404// Try this before the selecting to OR. 1405let SchedRW = [WriteALU] in { 1406 1407let isConvertibleToThreeAddress = 1, isPseudo = 1, 1408 Constraints = "$src1 = $dst", Defs = [EFLAGS] in { 1409let isCommutable = 1 in { 1410def ADD8rr_DB : I<0, Pseudo, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), 1411 "", // orb/addb REG, REG 1412 [(set GR8:$dst, (or_is_add GR8:$src1, GR8:$src2))]>; 1413def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), 1414 "", // orw/addw REG, REG 1415 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>; 1416def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), 1417 "", // orl/addl REG, REG 1418 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>; 1419def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), 1420 "", // orq/addq REG, REG 1421 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>; 1422} // isCommutable 1423 1424// NOTE: These are order specific, we want the ri8 forms to be listed 1425// first so that they are slightly preferred to the ri forms. 1426 1427def ADD8ri_DB : I<0, Pseudo, 1428 (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2), 1429 "", // orb/addb REG, imm8 1430 [(set GR8:$dst, (or_is_add GR8:$src1, imm:$src2))]>; 1431def ADD16ri8_DB : I<0, Pseudo, 1432 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2), 1433 "", // orw/addw REG, imm8 1434 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>; 1435def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2), 1436 "", // orw/addw REG, imm 1437 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>; 1438 1439def ADD32ri8_DB : I<0, Pseudo, 1440 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2), 1441 "", // orl/addl REG, imm8 1442 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>; 1443def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2), 1444 "", // orl/addl REG, imm 1445 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>; 1446 1447 1448def ADD64ri8_DB : I<0, Pseudo, 1449 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), 1450 "", // orq/addq REG, imm8 1451 [(set GR64:$dst, (or_is_add GR64:$src1, 1452 i64immSExt8:$src2))]>; 1453def ADD64ri32_DB : I<0, Pseudo, 1454 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), 1455 "", // orq/addq REG, imm 1456 [(set GR64:$dst, (or_is_add GR64:$src1, 1457 i64immSExt32:$src2))]>; 1458} 1459} // AddedComplexity, SchedRW 1460 1461//===----------------------------------------------------------------------===// 1462// Pattern match SUB as XOR 1463//===----------------------------------------------------------------------===// 1464 1465// An immediate in the LHS of a subtract can't be encoded in the instruction. 1466// If there is no possibility of a borrow we can use an XOR instead of a SUB 1467// to enable the immediate to be folded. 1468// TODO: Move this to a DAG combine? 1469 1470def sub_is_xor : PatFrag<(ops node:$lhs, node:$rhs), (sub node:$lhs, node:$rhs),[{ 1471 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 1472 KnownBits Known = CurDAG->computeKnownBits(N->getOperand(1)); 1473 1474 // If all possible ones in the RHS are set in the LHS then there can't be 1475 // a borrow and we can use xor. 1476 return (~Known.Zero).isSubsetOf(CN->getAPIntValue()); 1477 } 1478 1479 return false; 1480}]>; 1481 1482let AddedComplexity = 5 in { 1483def : Pat<(sub_is_xor imm:$src2, GR8:$src1), 1484 (XOR8ri GR8:$src1, imm:$src2)>; 1485def : Pat<(sub_is_xor i16immSExt8:$src2, GR16:$src1), 1486 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; 1487def : Pat<(sub_is_xor imm:$src2, GR16:$src1), 1488 (XOR16ri GR16:$src1, imm:$src2)>; 1489def : Pat<(sub_is_xor i32immSExt8:$src2, GR32:$src1), 1490 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; 1491def : Pat<(sub_is_xor imm:$src2, GR32:$src1), 1492 (XOR32ri GR32:$src1, imm:$src2)>; 1493def : Pat<(sub_is_xor i64immSExt8:$src2, GR64:$src1), 1494 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; 1495def : Pat<(sub_is_xor i64immSExt32:$src2, GR64:$src1), 1496 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; 1497} 1498 1499//===----------------------------------------------------------------------===// 1500// Some peepholes 1501//===----------------------------------------------------------------------===// 1502 1503// Odd encoding trick: -128 fits into an 8-bit immediate field while 1504// +128 doesn't, so in this special case use a sub instead of an add. 1505def : Pat<(add GR16:$src1, 128), 1506 (SUB16ri8 GR16:$src1, -128)>; 1507def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst), 1508 (SUB16mi8 addr:$dst, -128)>; 1509 1510def : Pat<(add GR32:$src1, 128), 1511 (SUB32ri8 GR32:$src1, -128)>; 1512def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst), 1513 (SUB32mi8 addr:$dst, -128)>; 1514 1515def : Pat<(add GR64:$src1, 128), 1516 (SUB64ri8 GR64:$src1, -128)>; 1517def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst), 1518 (SUB64mi8 addr:$dst, -128)>; 1519 1520def : Pat<(X86add_flag_nocf GR16:$src1, 128), 1521 (SUB16ri8 GR16:$src1, -128)>; 1522def : Pat<(X86add_flag_nocf GR32:$src1, 128), 1523 (SUB32ri8 GR32:$src1, -128)>; 1524def : Pat<(X86add_flag_nocf GR64:$src1, 128), 1525 (SUB64ri8 GR64:$src1, -128)>; 1526 1527// The same trick applies for 32-bit immediate fields in 64-bit 1528// instructions. 1529def : Pat<(add GR64:$src1, 0x0000000080000000), 1530 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; 1531def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst), 1532 (SUB64mi32 addr:$dst, 0xffffffff80000000)>; 1533 1534def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000), 1535 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; 1536 1537// To avoid needing to materialize an immediate in a register, use a 32-bit and 1538// with implicit zero-extension instead of a 64-bit and if the immediate has at 1539// least 32 bits of leading zeros. If in addition the last 32 bits can be 1540// represented with a sign extension of a 8 bit constant, use that. 1541// This can also reduce instruction size by eliminating the need for the REX 1542// prefix. 1543 1544// AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32. 1545let AddedComplexity = 1 in { 1546def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm), 1547 (SUBREG_TO_REG 1548 (i64 0), 1549 (AND32ri8 1550 (EXTRACT_SUBREG GR64:$src, sub_32bit), 1551 (i32 (GetLo32XForm imm:$imm))), 1552 sub_32bit)>; 1553 1554def : Pat<(and GR64:$src, i64immZExt32:$imm), 1555 (SUBREG_TO_REG 1556 (i64 0), 1557 (AND32ri 1558 (EXTRACT_SUBREG GR64:$src, sub_32bit), 1559 (i32 (GetLo32XForm imm:$imm))), 1560 sub_32bit)>; 1561} // AddedComplexity = 1 1562 1563 1564// AddedComplexity is needed due to the increased complexity on the 1565// i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all 1566// the MOVZX patterns keeps thems together in DAGIsel tables. 1567let AddedComplexity = 1 in { 1568// r & (2^16-1) ==> movz 1569def : Pat<(and GR32:$src1, 0xffff), 1570 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>; 1571// r & (2^8-1) ==> movz 1572def : Pat<(and GR32:$src1, 0xff), 1573 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>; 1574// r & (2^8-1) ==> movz 1575def : Pat<(and GR16:$src1, 0xff), 1576 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)), 1577 sub_16bit)>; 1578 1579// r & (2^32-1) ==> movz 1580def : Pat<(and GR64:$src, 0x00000000FFFFFFFF), 1581 (SUBREG_TO_REG (i64 0), 1582 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)), 1583 sub_32bit)>; 1584// r & (2^16-1) ==> movz 1585def : Pat<(and GR64:$src, 0xffff), 1586 (SUBREG_TO_REG (i64 0), 1587 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))), 1588 sub_32bit)>; 1589// r & (2^8-1) ==> movz 1590def : Pat<(and GR64:$src, 0xff), 1591 (SUBREG_TO_REG (i64 0), 1592 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))), 1593 sub_32bit)>; 1594} // AddedComplexity = 1 1595 1596 1597// Try to use BTS/BTR/BTC for single bit operations on the upper 32-bits. 1598 1599def BTRXForm : SDNodeXForm<imm, [{ 1600 // Transformation function: Find the lowest 0. 1601 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingOnes(), SDLoc(N)); 1602}]>; 1603 1604def BTCBTSXForm : SDNodeXForm<imm, [{ 1605 // Transformation function: Find the lowest 1. 1606 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingZeros(), SDLoc(N)); 1607}]>; 1608 1609def BTRMask64 : ImmLeaf<i64, [{ 1610 return !isUInt<32>(Imm) && !isInt<32>(Imm) && isPowerOf2_64(~Imm); 1611}]>; 1612 1613def BTCBTSMask64 : ImmLeaf<i64, [{ 1614 return !isInt<32>(Imm) && isPowerOf2_64(Imm); 1615}]>; 1616 1617// For now only do this for optsize. 1618let AddedComplexity = 1, Predicates=[OptForSize] in { 1619 def : Pat<(and GR64:$src1, BTRMask64:$mask), 1620 (BTR64ri8 GR64:$src1, (BTRXForm imm:$mask))>; 1621 def : Pat<(or GR64:$src1, BTCBTSMask64:$mask), 1622 (BTS64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>; 1623 def : Pat<(xor GR64:$src1, BTCBTSMask64:$mask), 1624 (BTC64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>; 1625} 1626 1627 1628// sext_inreg patterns 1629def : Pat<(sext_inreg GR32:$src, i16), 1630 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>; 1631def : Pat<(sext_inreg GR32:$src, i8), 1632 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>; 1633 1634def : Pat<(sext_inreg GR16:$src, i8), 1635 (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)), 1636 sub_16bit)>; 1637 1638def : Pat<(sext_inreg GR64:$src, i32), 1639 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>; 1640def : Pat<(sext_inreg GR64:$src, i16), 1641 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>; 1642def : Pat<(sext_inreg GR64:$src, i8), 1643 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>; 1644 1645// sext, sext_load, zext, zext_load 1646def: Pat<(i16 (sext GR8:$src)), 1647 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>; 1648def: Pat<(sextloadi16i8 addr:$src), 1649 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>; 1650def: Pat<(i16 (zext GR8:$src)), 1651 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>; 1652def: Pat<(zextloadi16i8 addr:$src), 1653 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1654 1655// trunc patterns 1656def : Pat<(i16 (trunc GR32:$src)), 1657 (EXTRACT_SUBREG GR32:$src, sub_16bit)>; 1658def : Pat<(i8 (trunc GR32:$src)), 1659 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), 1660 sub_8bit)>, 1661 Requires<[Not64BitMode]>; 1662def : Pat<(i8 (trunc GR16:$src)), 1663 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1664 sub_8bit)>, 1665 Requires<[Not64BitMode]>; 1666def : Pat<(i32 (trunc GR64:$src)), 1667 (EXTRACT_SUBREG GR64:$src, sub_32bit)>; 1668def : Pat<(i16 (trunc GR64:$src)), 1669 (EXTRACT_SUBREG GR64:$src, sub_16bit)>; 1670def : Pat<(i8 (trunc GR64:$src)), 1671 (EXTRACT_SUBREG GR64:$src, sub_8bit)>; 1672def : Pat<(i8 (trunc GR32:$src)), 1673 (EXTRACT_SUBREG GR32:$src, sub_8bit)>, 1674 Requires<[In64BitMode]>; 1675def : Pat<(i8 (trunc GR16:$src)), 1676 (EXTRACT_SUBREG GR16:$src, sub_8bit)>, 1677 Requires<[In64BitMode]>; 1678 1679def immff00_ffff : ImmLeaf<i32, [{ 1680 return Imm >= 0xff00 && Imm <= 0xffff; 1681}]>; 1682 1683// h-register tricks 1684def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))), 1685 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>, 1686 Requires<[Not64BitMode]>; 1687def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))), 1688 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>, 1689 Requires<[Not64BitMode]>; 1690def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))), 1691 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>, 1692 Requires<[Not64BitMode]>; 1693def : Pat<(srl GR16:$src, (i8 8)), 1694 (EXTRACT_SUBREG 1695 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1696 sub_16bit)>; 1697def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), 1698 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>; 1699def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), 1700 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>; 1701def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), 1702 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>; 1703def : Pat<(srl (and_su GR32:$src, immff00_ffff), (i8 8)), 1704 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>; 1705 1706// h-register tricks. 1707// For now, be conservative on x86-64 and use an h-register extract only if the 1708// value is immediately zero-extended or stored, which are somewhat common 1709// cases. This uses a bunch of code to prevent a register requiring a REX prefix 1710// from being allocated in the same instruction as the h register, as there's 1711// currently no way to describe this requirement to the register allocator. 1712 1713// h-register extract and zero-extend. 1714def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)), 1715 (SUBREG_TO_REG 1716 (i64 0), 1717 (MOVZX32rr8_NOREX 1718 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)), 1719 sub_32bit)>; 1720def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))), 1721 (SUBREG_TO_REG 1722 (i64 0), 1723 (MOVZX32rr8_NOREX 1724 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1725 sub_32bit)>; 1726def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))), 1727 (SUBREG_TO_REG 1728 (i64 0), 1729 (MOVZX32rr8_NOREX 1730 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1731 sub_32bit)>; 1732 1733// h-register extract and store. 1734def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst), 1735 (MOV8mr_NOREX 1736 addr:$dst, 1737 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>; 1738def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst), 1739 (MOV8mr_NOREX 1740 addr:$dst, 1741 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>, 1742 Requires<[In64BitMode]>; 1743def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst), 1744 (MOV8mr_NOREX 1745 addr:$dst, 1746 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>, 1747 Requires<[In64BitMode]>; 1748 1749 1750// (shl x, 1) ==> (add x, x) 1751// Note that if x is undef (immediate or otherwise), we could theoretically 1752// end up with the two uses of x getting different values, producing a result 1753// where the least significant bit is not 0. However, the probability of this 1754// happening is considered low enough that this is officially not a 1755// "real problem". 1756def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>; 1757def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>; 1758def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>; 1759def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>; 1760 1761def shiftMask8 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1762 return isUnneededShiftMask(N, 3); 1763}]>; 1764 1765def shiftMask16 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1766 return isUnneededShiftMask(N, 4); 1767}]>; 1768 1769def shiftMask32 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1770 return isUnneededShiftMask(N, 5); 1771}]>; 1772 1773def shiftMask64 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1774 return isUnneededShiftMask(N, 6); 1775}]>; 1776 1777 1778// Shift amount is implicitly masked. 1779multiclass MaskedShiftAmountPats<SDNode frag, string name> { 1780 // (shift x (and y, 31)) ==> (shift x, y) 1781 def : Pat<(frag GR8:$src1, (shiftMask32 CL)), 1782 (!cast<Instruction>(name # "8rCL") GR8:$src1)>; 1783 def : Pat<(frag GR16:$src1, (shiftMask32 CL)), 1784 (!cast<Instruction>(name # "16rCL") GR16:$src1)>; 1785 def : Pat<(frag GR32:$src1, (shiftMask32 CL)), 1786 (!cast<Instruction>(name # "32rCL") GR32:$src1)>; 1787 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask32 CL)), addr:$dst), 1788 (!cast<Instruction>(name # "8mCL") addr:$dst)>; 1789 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask32 CL)), addr:$dst), 1790 (!cast<Instruction>(name # "16mCL") addr:$dst)>; 1791 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst), 1792 (!cast<Instruction>(name # "32mCL") addr:$dst)>; 1793 1794 // (shift x (and y, 63)) ==> (shift x, y) 1795 def : Pat<(frag GR64:$src1, (shiftMask64 CL)), 1796 (!cast<Instruction>(name # "64rCL") GR64:$src1)>; 1797 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst), 1798 (!cast<Instruction>(name # "64mCL") addr:$dst)>; 1799} 1800 1801defm : MaskedShiftAmountPats<shl, "SHL">; 1802defm : MaskedShiftAmountPats<srl, "SHR">; 1803defm : MaskedShiftAmountPats<sra, "SAR">; 1804 1805// ROL/ROR instructions allow a stronger mask optimization than shift for 8- and 1806// 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount 1807// because over-rotating produces the same result. This is noted in the Intel 1808// docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation 1809// amount could affect EFLAGS results, but that does not matter because we are 1810// not tracking flags for these nodes. 1811multiclass MaskedRotateAmountPats<SDNode frag, string name> { 1812 // (rot x (and y, BitWidth - 1)) ==> (rot x, y) 1813 def : Pat<(frag GR8:$src1, (shiftMask8 CL)), 1814 (!cast<Instruction>(name # "8rCL") GR8:$src1)>; 1815 def : Pat<(frag GR16:$src1, (shiftMask16 CL)), 1816 (!cast<Instruction>(name # "16rCL") GR16:$src1)>; 1817 def : Pat<(frag GR32:$src1, (shiftMask32 CL)), 1818 (!cast<Instruction>(name # "32rCL") GR32:$src1)>; 1819 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask8 CL)), addr:$dst), 1820 (!cast<Instruction>(name # "8mCL") addr:$dst)>; 1821 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask16 CL)), addr:$dst), 1822 (!cast<Instruction>(name # "16mCL") addr:$dst)>; 1823 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst), 1824 (!cast<Instruction>(name # "32mCL") addr:$dst)>; 1825 1826 // (rot x (and y, 63)) ==> (rot x, y) 1827 def : Pat<(frag GR64:$src1, (shiftMask64 CL)), 1828 (!cast<Instruction>(name # "64rCL") GR64:$src1)>; 1829 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst), 1830 (!cast<Instruction>(name # "64mCL") addr:$dst)>; 1831} 1832 1833 1834defm : MaskedRotateAmountPats<rotl, "ROL">; 1835defm : MaskedRotateAmountPats<rotr, "ROR">; 1836 1837// Double shift amount is implicitly masked. 1838multiclass MaskedDoubleShiftAmountPats<SDNode frag, string name> { 1839 // (shift x (and y, 31)) ==> (shift x, y) 1840 def : Pat<(frag GR16:$src1, GR16:$src2, (shiftMask32 CL)), 1841 (!cast<Instruction>(name # "16rrCL") GR16:$src1, GR16:$src2)>; 1842 def : Pat<(frag GR32:$src1, GR32:$src2, (shiftMask32 CL)), 1843 (!cast<Instruction>(name # "32rrCL") GR32:$src1, GR32:$src2)>; 1844 1845 // (shift x (and y, 63)) ==> (shift x, y) 1846 def : Pat<(frag GR64:$src1, GR64:$src2, (shiftMask32 CL)), 1847 (!cast<Instruction>(name # "64rrCL") GR64:$src1, GR64:$src2)>; 1848} 1849 1850defm : MaskedDoubleShiftAmountPats<X86shld, "SHLD">; 1851defm : MaskedDoubleShiftAmountPats<X86shrd, "SHRD">; 1852 1853let Predicates = [HasBMI2] in { 1854 let AddedComplexity = 1 in { 1855 def : Pat<(sra GR32:$src1, (shiftMask32 GR8:$src2)), 1856 (SARX32rr GR32:$src1, 1857 (INSERT_SUBREG 1858 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1859 def : Pat<(sra GR64:$src1, (shiftMask64 GR8:$src2)), 1860 (SARX64rr GR64:$src1, 1861 (INSERT_SUBREG 1862 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1863 1864 def : Pat<(srl GR32:$src1, (shiftMask32 GR8:$src2)), 1865 (SHRX32rr GR32:$src1, 1866 (INSERT_SUBREG 1867 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1868 def : Pat<(srl GR64:$src1, (shiftMask64 GR8:$src2)), 1869 (SHRX64rr GR64:$src1, 1870 (INSERT_SUBREG 1871 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1872 1873 def : Pat<(shl GR32:$src1, (shiftMask32 GR8:$src2)), 1874 (SHLX32rr GR32:$src1, 1875 (INSERT_SUBREG 1876 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1877 def : Pat<(shl GR64:$src1, (shiftMask64 GR8:$src2)), 1878 (SHLX64rr GR64:$src1, 1879 (INSERT_SUBREG 1880 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1881 } 1882 1883 def : Pat<(sra (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1884 (SARX32rm addr:$src1, 1885 (INSERT_SUBREG 1886 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1887 def : Pat<(sra (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1888 (SARX64rm addr:$src1, 1889 (INSERT_SUBREG 1890 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1891 1892 def : Pat<(srl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1893 (SHRX32rm addr:$src1, 1894 (INSERT_SUBREG 1895 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1896 def : Pat<(srl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1897 (SHRX64rm addr:$src1, 1898 (INSERT_SUBREG 1899 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1900 1901 def : Pat<(shl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1902 (SHLX32rm addr:$src1, 1903 (INSERT_SUBREG 1904 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1905 def : Pat<(shl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1906 (SHLX64rm addr:$src1, 1907 (INSERT_SUBREG 1908 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1909} 1910 1911// Use BTR/BTS/BTC for clearing/setting/toggling a bit in a variable location. 1912multiclass one_bit_patterns<RegisterClass RC, ValueType VT, Instruction BTR, 1913 Instruction BTS, Instruction BTC, 1914 PatFrag ShiftMask> { 1915 def : Pat<(and RC:$src1, (rotl -2, GR8:$src2)), 1916 (BTR RC:$src1, 1917 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1918 def : Pat<(or RC:$src1, (shl 1, GR8:$src2)), 1919 (BTS RC:$src1, 1920 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1921 def : Pat<(xor RC:$src1, (shl 1, GR8:$src2)), 1922 (BTC RC:$src1, 1923 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1924 1925 // Similar to above, but removing unneeded masking of the shift amount. 1926 def : Pat<(and RC:$src1, (rotl -2, (ShiftMask GR8:$src2))), 1927 (BTR RC:$src1, 1928 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1929 def : Pat<(or RC:$src1, (shl 1, (ShiftMask GR8:$src2))), 1930 (BTS RC:$src1, 1931 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1932 def : Pat<(xor RC:$src1, (shl 1, (ShiftMask GR8:$src2))), 1933 (BTC RC:$src1, 1934 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1935} 1936 1937defm : one_bit_patterns<GR16, i16, BTR16rr, BTS16rr, BTC16rr, shiftMask16>; 1938defm : one_bit_patterns<GR32, i32, BTR32rr, BTS32rr, BTC32rr, shiftMask32>; 1939defm : one_bit_patterns<GR64, i64, BTR64rr, BTS64rr, BTC64rr, shiftMask64>; 1940 1941 1942// (anyext (setcc_carry)) -> (setcc_carry) 1943def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 1944 (SETB_C16r)>; 1945def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), 1946 (SETB_C32r)>; 1947def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))), 1948 (SETB_C32r)>; 1949 1950//===----------------------------------------------------------------------===// 1951// EFLAGS-defining Patterns 1952//===----------------------------------------------------------------------===// 1953 1954// add reg, reg 1955def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>; 1956def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>; 1957def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>; 1958def : Pat<(add GR64:$src1, GR64:$src2), (ADD64rr GR64:$src1, GR64:$src2)>; 1959 1960// add reg, mem 1961def : Pat<(add GR8:$src1, (loadi8 addr:$src2)), 1962 (ADD8rm GR8:$src1, addr:$src2)>; 1963def : Pat<(add GR16:$src1, (loadi16 addr:$src2)), 1964 (ADD16rm GR16:$src1, addr:$src2)>; 1965def : Pat<(add GR32:$src1, (loadi32 addr:$src2)), 1966 (ADD32rm GR32:$src1, addr:$src2)>; 1967def : Pat<(add GR64:$src1, (loadi64 addr:$src2)), 1968 (ADD64rm GR64:$src1, addr:$src2)>; 1969 1970// add reg, imm 1971def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>; 1972def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>; 1973def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>; 1974def : Pat<(add GR16:$src1, i16immSExt8:$src2), 1975 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>; 1976def : Pat<(add GR32:$src1, i32immSExt8:$src2), 1977 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>; 1978def : Pat<(add GR64:$src1, i64immSExt8:$src2), 1979 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; 1980def : Pat<(add GR64:$src1, i64immSExt32:$src2), 1981 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>; 1982 1983// sub reg, reg 1984def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>; 1985def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>; 1986def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>; 1987def : Pat<(sub GR64:$src1, GR64:$src2), (SUB64rr GR64:$src1, GR64:$src2)>; 1988 1989// sub reg, mem 1990def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)), 1991 (SUB8rm GR8:$src1, addr:$src2)>; 1992def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)), 1993 (SUB16rm GR16:$src1, addr:$src2)>; 1994def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)), 1995 (SUB32rm GR32:$src1, addr:$src2)>; 1996def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)), 1997 (SUB64rm GR64:$src1, addr:$src2)>; 1998 1999// sub reg, imm 2000def : Pat<(sub GR8:$src1, imm:$src2), 2001 (SUB8ri GR8:$src1, imm:$src2)>; 2002def : Pat<(sub GR16:$src1, imm:$src2), 2003 (SUB16ri GR16:$src1, imm:$src2)>; 2004def : Pat<(sub GR32:$src1, imm:$src2), 2005 (SUB32ri GR32:$src1, imm:$src2)>; 2006def : Pat<(sub GR16:$src1, i16immSExt8:$src2), 2007 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>; 2008def : Pat<(sub GR32:$src1, i32immSExt8:$src2), 2009 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>; 2010def : Pat<(sub GR64:$src1, i64immSExt8:$src2), 2011 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>; 2012def : Pat<(sub GR64:$src1, i64immSExt32:$src2), 2013 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>; 2014 2015// sub 0, reg 2016def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>; 2017def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>; 2018def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>; 2019def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>; 2020 2021// sub reg, relocImm 2022def : Pat<(X86sub_flag GR64:$src1, i64relocImmSExt8_su:$src2), 2023 (SUB64ri8 GR64:$src1, i64relocImmSExt8_su:$src2)>; 2024 2025// mul reg, reg 2026def : Pat<(mul GR16:$src1, GR16:$src2), 2027 (IMUL16rr GR16:$src1, GR16:$src2)>; 2028def : Pat<(mul GR32:$src1, GR32:$src2), 2029 (IMUL32rr GR32:$src1, GR32:$src2)>; 2030def : Pat<(mul GR64:$src1, GR64:$src2), 2031 (IMUL64rr GR64:$src1, GR64:$src2)>; 2032 2033// mul reg, mem 2034def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)), 2035 (IMUL16rm GR16:$src1, addr:$src2)>; 2036def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)), 2037 (IMUL32rm GR32:$src1, addr:$src2)>; 2038def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)), 2039 (IMUL64rm GR64:$src1, addr:$src2)>; 2040 2041// mul reg, imm 2042def : Pat<(mul GR16:$src1, imm:$src2), 2043 (IMUL16rri GR16:$src1, imm:$src2)>; 2044def : Pat<(mul GR32:$src1, imm:$src2), 2045 (IMUL32rri GR32:$src1, imm:$src2)>; 2046def : Pat<(mul GR16:$src1, i16immSExt8:$src2), 2047 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>; 2048def : Pat<(mul GR32:$src1, i32immSExt8:$src2), 2049 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>; 2050def : Pat<(mul GR64:$src1, i64immSExt8:$src2), 2051 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>; 2052def : Pat<(mul GR64:$src1, i64immSExt32:$src2), 2053 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>; 2054 2055// reg = mul mem, imm 2056def : Pat<(mul (loadi16 addr:$src1), imm:$src2), 2057 (IMUL16rmi addr:$src1, imm:$src2)>; 2058def : Pat<(mul (loadi32 addr:$src1), imm:$src2), 2059 (IMUL32rmi addr:$src1, imm:$src2)>; 2060def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2), 2061 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>; 2062def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2), 2063 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>; 2064def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2), 2065 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>; 2066def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2), 2067 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>; 2068 2069// Increment/Decrement reg. 2070// Do not make INC/DEC if it is slow 2071let Predicates = [UseIncDec] in { 2072 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>; 2073 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>; 2074 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>; 2075 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>; 2076 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>; 2077 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>; 2078 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>; 2079 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>; 2080 2081 def : Pat<(X86add_flag_nocf GR8:$src, -1), (DEC8r GR8:$src)>; 2082 def : Pat<(X86add_flag_nocf GR16:$src, -1), (DEC16r GR16:$src)>; 2083 def : Pat<(X86add_flag_nocf GR32:$src, -1), (DEC32r GR32:$src)>; 2084 def : Pat<(X86add_flag_nocf GR64:$src, -1), (DEC64r GR64:$src)>; 2085 def : Pat<(X86sub_flag_nocf GR8:$src, -1), (INC8r GR8:$src)>; 2086 def : Pat<(X86sub_flag_nocf GR16:$src, -1), (INC16r GR16:$src)>; 2087 def : Pat<(X86sub_flag_nocf GR32:$src, -1), (INC32r GR32:$src)>; 2088 def : Pat<(X86sub_flag_nocf GR64:$src, -1), (INC64r GR64:$src)>; 2089} 2090 2091// or reg/reg. 2092def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>; 2093def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>; 2094def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>; 2095def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>; 2096 2097// or reg/mem 2098def : Pat<(or GR8:$src1, (loadi8 addr:$src2)), 2099 (OR8rm GR8:$src1, addr:$src2)>; 2100def : Pat<(or GR16:$src1, (loadi16 addr:$src2)), 2101 (OR16rm GR16:$src1, addr:$src2)>; 2102def : Pat<(or GR32:$src1, (loadi32 addr:$src2)), 2103 (OR32rm GR32:$src1, addr:$src2)>; 2104def : Pat<(or GR64:$src1, (loadi64 addr:$src2)), 2105 (OR64rm GR64:$src1, addr:$src2)>; 2106 2107// or reg/imm 2108def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>; 2109def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>; 2110def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>; 2111def : Pat<(or GR16:$src1, i16immSExt8:$src2), 2112 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>; 2113def : Pat<(or GR32:$src1, i32immSExt8:$src2), 2114 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>; 2115def : Pat<(or GR64:$src1, i64immSExt8:$src2), 2116 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>; 2117def : Pat<(or GR64:$src1, i64immSExt32:$src2), 2118 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>; 2119 2120// xor reg/reg 2121def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>; 2122def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>; 2123def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>; 2124def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>; 2125 2126// xor reg/mem 2127def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)), 2128 (XOR8rm GR8:$src1, addr:$src2)>; 2129def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)), 2130 (XOR16rm GR16:$src1, addr:$src2)>; 2131def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)), 2132 (XOR32rm GR32:$src1, addr:$src2)>; 2133def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)), 2134 (XOR64rm GR64:$src1, addr:$src2)>; 2135 2136// xor reg/imm 2137def : Pat<(xor GR8:$src1, imm:$src2), 2138 (XOR8ri GR8:$src1, imm:$src2)>; 2139def : Pat<(xor GR16:$src1, imm:$src2), 2140 (XOR16ri GR16:$src1, imm:$src2)>; 2141def : Pat<(xor GR32:$src1, imm:$src2), 2142 (XOR32ri GR32:$src1, imm:$src2)>; 2143def : Pat<(xor GR16:$src1, i16immSExt8:$src2), 2144 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; 2145def : Pat<(xor GR32:$src1, i32immSExt8:$src2), 2146 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; 2147def : Pat<(xor GR64:$src1, i64immSExt8:$src2), 2148 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; 2149def : Pat<(xor GR64:$src1, i64immSExt32:$src2), 2150 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; 2151 2152// and reg/reg 2153def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>; 2154def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>; 2155def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>; 2156def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>; 2157 2158// and reg/mem 2159def : Pat<(and GR8:$src1, (loadi8 addr:$src2)), 2160 (AND8rm GR8:$src1, addr:$src2)>; 2161def : Pat<(and GR16:$src1, (loadi16 addr:$src2)), 2162 (AND16rm GR16:$src1, addr:$src2)>; 2163def : Pat<(and GR32:$src1, (loadi32 addr:$src2)), 2164 (AND32rm GR32:$src1, addr:$src2)>; 2165def : Pat<(and GR64:$src1, (loadi64 addr:$src2)), 2166 (AND64rm GR64:$src1, addr:$src2)>; 2167 2168// and reg/imm 2169def : Pat<(and GR8:$src1, imm:$src2), 2170 (AND8ri GR8:$src1, imm:$src2)>; 2171def : Pat<(and GR16:$src1, imm:$src2), 2172 (AND16ri GR16:$src1, imm:$src2)>; 2173def : Pat<(and GR32:$src1, imm:$src2), 2174 (AND32ri GR32:$src1, imm:$src2)>; 2175def : Pat<(and GR16:$src1, i16immSExt8:$src2), 2176 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>; 2177def : Pat<(and GR32:$src1, i32immSExt8:$src2), 2178 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>; 2179def : Pat<(and GR64:$src1, i64immSExt8:$src2), 2180 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>; 2181def : Pat<(and GR64:$src1, i64immSExt32:$src2), 2182 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>; 2183 2184// Bit scan instruction patterns to match explicit zero-undef behavior. 2185def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>; 2186def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>; 2187def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>; 2188def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>; 2189def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>; 2190def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>; 2191 2192// When HasMOVBE is enabled it is possible to get a non-legalized 2193// register-register 16 bit bswap. This maps it to a ROL instruction. 2194let Predicates = [HasMOVBE] in { 2195 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>; 2196} 2197