1//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the various pseudo instructions used by the compiler, 10// as well as Pat patterns used during instruction selection. 11// 12//===----------------------------------------------------------------------===// 13 14//===----------------------------------------------------------------------===// 15// Pattern Matching Support 16 17def GetLo32XForm : SDNodeXForm<imm, [{ 18 // Transformation function: get the low 32 bits. 19 return getI32Imm((uint32_t)N->getZExtValue(), SDLoc(N)); 20}]>; 21 22 23//===----------------------------------------------------------------------===// 24// Random Pseudo Instructions. 25 26// PIC base construction. This expands to code that looks like this: 27// call $next_inst 28// popl %destreg" 29let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP], 30 SchedRW = [WriteJump] in 31 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label), 32 "", []>; 33 34// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into 35// a stack adjustment and the codegen must know that they may modify the stack 36// pointer before prolog-epilog rewriting occurs. 37// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become 38// sub / add which can clobber EFLAGS. 39let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in { 40def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), 41 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3), 42 "#ADJCALLSTACKDOWN", []>, Requires<[NotLP64]>; 43def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), 44 "#ADJCALLSTACKUP", 45 [(X86callseq_end timm:$amt1, timm:$amt2)]>, 46 Requires<[NotLP64]>; 47} 48def : Pat<(X86callseq_start timm:$amt1, timm:$amt2), 49 (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>; 50 51 52// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into 53// a stack adjustment and the codegen must know that they may modify the stack 54// pointer before prolog-epilog rewriting occurs. 55// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become 56// sub / add which can clobber EFLAGS. 57let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in { 58def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), 59 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3), 60 "#ADJCALLSTACKDOWN", []>, Requires<[IsLP64]>; 61def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), 62 "#ADJCALLSTACKUP", 63 [(X86callseq_end timm:$amt1, timm:$amt2)]>, 64 Requires<[IsLP64]>; 65} 66def : Pat<(X86callseq_start timm:$amt1, timm:$amt2), 67 (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>; 68 69let SchedRW = [WriteSystem] in { 70 71// x86-64 va_start lowering magic. 72let usesCustomInserter = 1, Defs = [EFLAGS] in { 73def VASTART_SAVE_XMM_REGS : I<0, Pseudo, 74 (outs), 75 (ins GR8:$al, 76 i32imm:$regsavefi, i32imm:$offset, 77 variable_ops), 78 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset", 79 [(X86vastart_save_xmm_regs GR8:$al, 80 timm:$regsavefi, 81 timm:$offset), 82 (implicit EFLAGS)]>; 83 84// The VAARG_64 pseudo-instruction takes the address of the va_list, 85// and places the address of the next argument into a register. 86let Defs = [EFLAGS] in 87def VAARG_64 : I<0, Pseudo, 88 (outs GR64:$dst), 89 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align), 90 "#VAARG_64 $dst, $ap, $size, $mode, $align", 91 [(set GR64:$dst, 92 (X86vaarg64 addr:$ap, timm:$size, timm:$mode, timm:$align)), 93 (implicit EFLAGS)]>; 94 95 96// When using segmented stacks these are lowered into instructions which first 97// check if the current stacklet has enough free memory. If it does, memory is 98// allocated by bumping the stack pointer. Otherwise memory is allocated from 99// the heap. 100 101let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 102def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size), 103 "# variable sized alloca for segmented stacks", 104 [(set GR32:$dst, 105 (X86SegAlloca GR32:$size))]>, 106 Requires<[NotLP64]>; 107 108let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 109def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size), 110 "# variable sized alloca for segmented stacks", 111 [(set GR64:$dst, 112 (X86SegAlloca GR64:$size))]>, 113 Requires<[In64BitMode]>; 114 115// To protect against stack clash, dynamic allocation should perform a memory 116// probe at each page. 117 118let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 119def PROBED_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size), 120 "# variable sized alloca with probing", 121 [(set GR32:$dst, 122 (X86ProbedAlloca GR32:$size))]>, 123 Requires<[NotLP64]>; 124 125let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 126def PROBED_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size), 127 "# variable sized alloca with probing", 128 [(set GR64:$dst, 129 (X86ProbedAlloca GR64:$size))]>, 130 Requires<[In64BitMode]>; 131} 132 133let hasNoSchedulingInfo = 1 in 134def STACKALLOC_W_PROBING : I<0, Pseudo, (outs), (ins i64imm:$stacksize), 135 "# fixed size alloca with probing", 136 []>; 137 138// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows 139// targets. These calls are needed to probe the stack when allocating more than 140// 4k bytes in one go. Touching the stack at 4K increments is necessary to 141// ensure that the guard pages used by the OS virtual memory manager are 142// allocated in correct sequence. 143// The main point of having separate instruction are extra unmodelled effects 144// (compared to ordinary calls) like stack pointer change. 145 146let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 147def WIN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size), 148 "# dynamic stack allocation", 149 [(X86WinAlloca GR32:$size)]>, 150 Requires<[NotLP64]>; 151 152let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 153def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size), 154 "# dynamic stack allocation", 155 [(X86WinAlloca GR64:$size)]>, 156 Requires<[In64BitMode]>; 157} // SchedRW 158 159// These instructions XOR the frame pointer into a GPR. They are used in some 160// stack protection schemes. These are post-RA pseudos because we only know the 161// frame register after register allocation. 162let Constraints = "$src = $dst", isMoveImm = 1, isPseudo = 1, Defs = [EFLAGS] in { 163 def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src), 164 "xorl\t$$FP, $src", []>, 165 Requires<[NotLP64]>, Sched<[WriteALU]>; 166 def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src), 167 "xorq\t$$FP $src", []>, 168 Requires<[In64BitMode]>, Sched<[WriteALU]>; 169} 170 171//===----------------------------------------------------------------------===// 172// EH Pseudo Instructions 173// 174let SchedRW = [WriteSystem] in { 175let isTerminator = 1, isReturn = 1, isBarrier = 1, 176 hasCtrlDep = 1, isCodeGenOnly = 1 in { 177def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr), 178 "ret\t#eh_return, addr: $addr", 179 [(X86ehret GR32:$addr)]>, Sched<[WriteJumpLd]>; 180 181} 182 183let isTerminator = 1, isReturn = 1, isBarrier = 1, 184 hasCtrlDep = 1, isCodeGenOnly = 1 in { 185def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), 186 "ret\t#eh_return, addr: $addr", 187 [(X86ehret GR64:$addr)]>, Sched<[WriteJumpLd]>; 188 189} 190 191let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, 192 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1 in { 193 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET", [(cleanupret)]>; 194 195 // CATCHRET needs a custom inserter for SEH. 196 let usesCustomInserter = 1 in 197 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from), 198 "# CATCHRET", 199 [(catchret bb:$dst, bb:$from)]>; 200} 201 202let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1, 203 usesCustomInserter = 1 in { 204 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf), 205 "#EH_SJLJ_SETJMP32", 206 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>, 207 Requires<[Not64BitMode]>; 208 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf), 209 "#EH_SJLJ_SETJMP64", 210 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>, 211 Requires<[In64BitMode]>; 212 let isTerminator = 1 in { 213 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf), 214 "#EH_SJLJ_LONGJMP32", 215 [(X86eh_sjlj_longjmp addr:$buf)]>, 216 Requires<[Not64BitMode]>; 217 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf), 218 "#EH_SJLJ_LONGJMP64", 219 [(X86eh_sjlj_longjmp addr:$buf)]>, 220 Requires<[In64BitMode]>; 221 } 222} 223 224let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in { 225 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst), 226 "#EH_SjLj_Setup\t$dst", []>; 227} 228} // SchedRW 229 230//===----------------------------------------------------------------------===// 231// Pseudo instructions used by unwind info. 232// 233let isPseudo = 1, SchedRW = [WriteSystem] in { 234 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg), 235 "#SEH_PushReg $reg", []>; 236 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst), 237 "#SEH_SaveReg $reg, $dst", []>; 238 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst), 239 "#SEH_SaveXMM $reg, $dst", []>; 240 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size), 241 "#SEH_StackAlloc $size", []>; 242 def SEH_StackAlign : I<0, Pseudo, (outs), (ins i32imm:$align), 243 "#SEH_StackAlign $align", []>; 244 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset), 245 "#SEH_SetFrame $reg, $offset", []>; 246 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode), 247 "#SEH_PushFrame $mode", []>; 248 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins), 249 "#SEH_EndPrologue", []>; 250 def SEH_Epilogue : I<0, Pseudo, (outs), (ins), 251 "#SEH_Epilogue", []>; 252} 253 254//===----------------------------------------------------------------------===// 255// Pseudo instructions used by segmented stacks. 256// 257 258// This is lowered into a RET instruction by MCInstLower. We need 259// this so that we don't have to have a MachineBasicBlock which ends 260// with a RET and also has successors. 261let isPseudo = 1, SchedRW = [WriteJumpLd] in { 262def MORESTACK_RET: I<0, Pseudo, (outs), (ins), "", []>; 263 264// This instruction is lowered to a RET followed by a MOV. The two 265// instructions are not generated on a higher level since then the 266// verifier sees a MachineBasicBlock ending with a non-terminator. 267def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), "", []>; 268} 269 270//===----------------------------------------------------------------------===// 271// Alias Instructions 272//===----------------------------------------------------------------------===// 273 274// Alias instruction mapping movr0 to xor. 275// FIXME: remove when we can teach regalloc that xor reg, reg is ok. 276let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1, 277 isPseudo = 1, isMoveImm = 1, AddedComplexity = 10 in 278def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 279 [(set GR32:$dst, 0)]>, Sched<[WriteZero]>; 280 281// Other widths can also make use of the 32-bit xor, which may have a smaller 282// encoding and avoid partial register updates. 283let AddedComplexity = 10 in { 284def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>; 285def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>; 286def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>; 287} 288 289let Predicates = [OptForSize, Not64BitMode], 290 AddedComplexity = 10 in { 291 let SchedRW = [WriteALU] in { 292 // Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC, 293 // which only require 3 bytes compared to MOV32ri which requires 5. 294 let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in { 295 def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 296 [(set GR32:$dst, 1)]>; 297 def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 298 [(set GR32:$dst, -1)]>; 299 } 300 } // SchedRW 301 302 // MOV16ri is 4 bytes, so the instructions above are smaller. 303 def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>; 304 def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>; 305} 306 307let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5, 308 SchedRW = [WriteALU] in { 309// AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1. 310def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "", 311 [(set GR32:$dst, i32immSExt8:$src)]>, 312 Requires<[OptForMinSize, NotWin64WithoutFP]>; 313def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "", 314 [(set GR64:$dst, i64immSExt8:$src)]>, 315 Requires<[OptForMinSize, NotWin64WithoutFP]>; 316} 317 318// Materialize i64 constant where top 32-bits are zero. This could theoretically 319// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however 320// that would make it more difficult to rematerialize. 321let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1, 322 isPseudo = 1, SchedRW = [WriteMove] in 323def MOV32ri64 : I<0, Pseudo, (outs GR64:$dst), (ins i64i32imm:$src), "", 324 [(set GR64:$dst, i64immZExt32:$src)]>; 325 326// This 64-bit pseudo-move can also be used for labels in the x86-64 small code 327// model. 328def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [X86Wrapper]>; 329def : Pat<(i64 mov64imm32:$src), (MOV32ri64 mov64imm32:$src)>; 330 331// Use sbb to materialize carry bit. 332let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteADC], 333 hasSideEffects = 0 in { 334// FIXME: These are pseudo ops that should be replaced with Pat<> patterns. 335// However, Pat<> can't replicate the destination reg into the inputs of the 336// result. 337def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "", []>; 338def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "", []>; 339} // isCodeGenOnly 340 341//===----------------------------------------------------------------------===// 342// String Pseudo Instructions 343// 344let SchedRW = [WriteMicrocoded] in { 345let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in { 346def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), 347 "{rep;movsb (%esi), %es:(%edi)|rep movsb es:[edi], [esi]}", 348 [(X86rep_movs i8)]>, REP, AdSize32, 349 Requires<[NotLP64]>; 350def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), 351 "{rep;movsw (%esi), %es:(%edi)|rep movsw es:[edi], [esi]}", 352 [(X86rep_movs i16)]>, REP, AdSize32, OpSize16, 353 Requires<[NotLP64]>; 354def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), 355 "{rep;movsl (%esi), %es:(%edi)|rep movsd es:[edi], [esi]}", 356 [(X86rep_movs i32)]>, REP, AdSize32, OpSize32, 357 Requires<[NotLP64]>; 358def REP_MOVSQ_32 : RI<0xA5, RawFrm, (outs), (ins), 359 "{rep;movsq (%esi), %es:(%edi)|rep movsq es:[edi], [esi]}", 360 [(X86rep_movs i64)]>, REP, AdSize32, 361 Requires<[NotLP64, In64BitMode]>; 362} 363 364let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in { 365def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), 366 "{rep;movsb (%rsi), %es:(%rdi)|rep movsb es:[rdi], [rsi]}", 367 [(X86rep_movs i8)]>, REP, AdSize64, 368 Requires<[IsLP64]>; 369def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), 370 "{rep;movsw (%rsi), %es:(%rdi)|rep movsw es:[rdi], [rsi]}", 371 [(X86rep_movs i16)]>, REP, AdSize64, OpSize16, 372 Requires<[IsLP64]>; 373def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), 374 "{rep;movsl (%rsi), %es:(%rdi)|rep movsdi es:[rdi], [rsi]}", 375 [(X86rep_movs i32)]>, REP, AdSize64, OpSize32, 376 Requires<[IsLP64]>; 377def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), 378 "{rep;movsq (%rsi), %es:(%rdi)|rep movsq es:[rdi], [rsi]}", 379 [(X86rep_movs i64)]>, REP, AdSize64, 380 Requires<[IsLP64]>; 381} 382 383// FIXME: Should use "(X86rep_stos AL)" as the pattern. 384let Defs = [ECX,EDI], isCodeGenOnly = 1 in { 385 let Uses = [AL,ECX,EDI] in 386 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), 387 "{rep;stosb %al, %es:(%edi)|rep stosb es:[edi], al}", 388 [(X86rep_stos i8)]>, REP, AdSize32, 389 Requires<[NotLP64]>; 390 let Uses = [AX,ECX,EDI] in 391 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), 392 "{rep;stosw %ax, %es:(%edi)|rep stosw es:[edi], ax}", 393 [(X86rep_stos i16)]>, REP, AdSize32, OpSize16, 394 Requires<[NotLP64]>; 395 let Uses = [EAX,ECX,EDI] in 396 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), 397 "{rep;stosl %eax, %es:(%edi)|rep stosd es:[edi], eax}", 398 [(X86rep_stos i32)]>, REP, AdSize32, OpSize32, 399 Requires<[NotLP64]>; 400 let Uses = [RAX,RCX,RDI] in 401 def REP_STOSQ_32 : RI<0xAB, RawFrm, (outs), (ins), 402 "{rep;stosq %rax, %es:(%edi)|rep stosq es:[edi], rax}", 403 [(X86rep_stos i64)]>, REP, AdSize32, 404 Requires<[NotLP64, In64BitMode]>; 405} 406 407let Defs = [RCX,RDI], isCodeGenOnly = 1 in { 408 let Uses = [AL,RCX,RDI] in 409 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), 410 "{rep;stosb %al, %es:(%rdi)|rep stosb es:[rdi], al}", 411 [(X86rep_stos i8)]>, REP, AdSize64, 412 Requires<[IsLP64]>; 413 let Uses = [AX,RCX,RDI] in 414 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), 415 "{rep;stosw %ax, %es:(%rdi)|rep stosw es:[rdi], ax}", 416 [(X86rep_stos i16)]>, REP, AdSize64, OpSize16, 417 Requires<[IsLP64]>; 418 let Uses = [RAX,RCX,RDI] in 419 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), 420 "{rep;stosl %eax, %es:(%rdi)|rep stosd es:[rdi], eax}", 421 [(X86rep_stos i32)]>, REP, AdSize64, OpSize32, 422 Requires<[IsLP64]>; 423 424 let Uses = [RAX,RCX,RDI] in 425 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), 426 "{rep;stosq %rax, %es:(%rdi)|rep stosq es:[rdi], rax}", 427 [(X86rep_stos i64)]>, REP, AdSize64, 428 Requires<[IsLP64]>; 429} 430} // SchedRW 431 432//===----------------------------------------------------------------------===// 433// Thread Local Storage Instructions 434// 435let SchedRW = [WriteSystem] in { 436 437// ELF TLS Support 438// All calls clobber the non-callee saved registers. ESP is marked as 439// a use to prevent stack-pointer assignments that appear immediately 440// before calls from potentially appearing dead. 441let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7, 442 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7, 443 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 444 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 445 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF], 446 usesCustomInserter = 1, Uses = [ESP, SSP] in { 447def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 448 "# TLS_addr32", 449 [(X86tlsaddr tls32addr:$sym)]>, 450 Requires<[Not64BitMode]>; 451def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 452 "# TLS_base_addr32", 453 [(X86tlsbaseaddr tls32baseaddr:$sym)]>, 454 Requires<[Not64BitMode]>; 455} 456 457// All calls clobber the non-callee saved registers. RSP is marked as 458// a use to prevent stack-pointer assignments that appear immediately 459// before calls from potentially appearing dead. 460let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, 461 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7, 462 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7, 463 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 464 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 465 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF], 466 usesCustomInserter = 1, Uses = [RSP, SSP] in { 467def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 468 "# TLS_addr64", 469 [(X86tlsaddr tls64addr:$sym)]>, 470 Requires<[In64BitMode]>; 471def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 472 "# TLS_base_addr64", 473 [(X86tlsbaseaddr tls64baseaddr:$sym)]>, 474 Requires<[In64BitMode]>; 475} 476 477// Darwin TLS Support 478// For i386, the address of the thunk is passed on the stack, on return the 479// address of the variable is in %eax. %ecx is trashed during the function 480// call. All other registers are preserved. 481let Defs = [EAX, ECX, EFLAGS, DF], 482 Uses = [ESP, SSP], 483 usesCustomInserter = 1 in 484def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 485 "# TLSCall_32", 486 [(X86TLSCall addr:$sym)]>, 487 Requires<[Not64BitMode]>; 488 489// For x86_64, the address of the thunk is passed in %rdi, but the 490// pseudo directly use the symbol, so do not add an implicit use of 491// %rdi. The lowering will do the right thing with RDI. 492// On return the address of the variable is in %rax. All other 493// registers are preserved. 494let Defs = [RAX, EFLAGS, DF], 495 Uses = [RSP, SSP], 496 usesCustomInserter = 1 in 497def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 498 "# TLSCall_64", 499 [(X86TLSCall addr:$sym)]>, 500 Requires<[In64BitMode]>; 501} // SchedRW 502 503//===----------------------------------------------------------------------===// 504// Conditional Move Pseudo Instructions 505 506// CMOV* - Used to implement the SELECT DAG operation. Expanded after 507// instruction selection into a branch sequence. 508multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> { 509 def CMOV#NAME : I<0, Pseudo, 510 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond), 511 "#CMOV_"#NAME#" PSEUDO!", 512 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, timm:$cond, 513 EFLAGS)))]>; 514} 515 516let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in { 517 // X86 doesn't have 8-bit conditional moves. Use a customInserter to 518 // emit control flow. An alternative to this is to mark i8 SELECT as Promote, 519 // however that requires promoting the operands, and can induce additional 520 // i8 register pressure. 521 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>; 522 523 let Predicates = [NoCMov] in { 524 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>; 525 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>; 526 } // Predicates = [NoCMov] 527 528 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no 529 // SSE1/SSE2. 530 let Predicates = [FPStackf32] in 531 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>; 532 533 let Predicates = [FPStackf64] in 534 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>; 535 536 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>; 537 538 let Predicates = [HasMMX] in 539 defm _VR64 : CMOVrr_PSEUDO<VR64, x86mmx>; 540 541 let Predicates = [HasSSE1,NoAVX512] in 542 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>; 543 let Predicates = [HasSSE2,NoAVX512] in 544 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>; 545 let Predicates = [HasAVX512] in { 546 defm _FR32X : CMOVrr_PSEUDO<FR32X, f32>; 547 defm _FR64X : CMOVrr_PSEUDO<FR64X, f64>; 548 } 549 let Predicates = [NoVLX] in { 550 defm _VR128 : CMOVrr_PSEUDO<VR128, v2i64>; 551 defm _VR256 : CMOVrr_PSEUDO<VR256, v4i64>; 552 } 553 let Predicates = [HasVLX] in { 554 defm _VR128X : CMOVrr_PSEUDO<VR128X, v2i64>; 555 defm _VR256X : CMOVrr_PSEUDO<VR256X, v4i64>; 556 } 557 defm _VR512 : CMOVrr_PSEUDO<VR512, v8i64>; 558 defm _VK1 : CMOVrr_PSEUDO<VK1, v1i1>; 559 defm _VK2 : CMOVrr_PSEUDO<VK2, v2i1>; 560 defm _VK4 : CMOVrr_PSEUDO<VK4, v4i1>; 561 defm _VK8 : CMOVrr_PSEUDO<VK8, v8i1>; 562 defm _VK16 : CMOVrr_PSEUDO<VK16, v16i1>; 563 defm _VK32 : CMOVrr_PSEUDO<VK32, v32i1>; 564 defm _VK64 : CMOVrr_PSEUDO<VK64, v64i1>; 565} // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] 566 567def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 568 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 569 570let Predicates = [NoVLX] in { 571 def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 572 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 573 def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 574 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 575 def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 576 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 577 def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 578 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 579 def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 580 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 581 582 def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 583 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 584 def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 585 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 586 def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 587 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 588 def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 589 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 590 def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 591 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 592} 593let Predicates = [HasVLX] in { 594 def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 595 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 596 def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 597 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 598 def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 599 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 600 def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 601 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 602 def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 603 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 604 605 def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 606 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 607 def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 608 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 609 def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 610 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 611 def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 612 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 613 def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 614 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 615} 616 617def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 618 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 619def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 620 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 621def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 622 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 623def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 624 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 625def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 626 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 627 628//===----------------------------------------------------------------------===// 629// Normal-Instructions-With-Lock-Prefix Pseudo Instructions 630//===----------------------------------------------------------------------===// 631 632// FIXME: Use normal instructions and add lock prefix dynamically. 633 634// Memory barriers 635 636let isCodeGenOnly = 1, Defs = [EFLAGS] in 637def OR32mi8Locked : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$zero), 638 "or{l}\t{$zero, $dst|$dst, $zero}", []>, 639 Requires<[Not64BitMode]>, OpSize32, LOCK, 640 Sched<[WriteALURMW]>; 641 642let hasSideEffects = 1 in 643def Int_MemBarrier : I<0, Pseudo, (outs), (ins), 644 "#MEMBARRIER", 645 [(X86MemBarrier)]>, Sched<[WriteLoad]>; 646 647// RegOpc corresponds to the mr version of the instruction 648// ImmOpc corresponds to the mi version of the instruction 649// ImmOpc8 corresponds to the mi8 version of the instruction 650// ImmMod corresponds to the instruction format of the mi and mi8 versions 651multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8, 652 Format ImmMod, SDNode Op, string mnemonic> { 653let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1, 654 SchedRW = [WriteALURMW] in { 655 656def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 657 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 }, 658 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2), 659 !strconcat(mnemonic, "{b}\t", 660 "{$src2, $dst|$dst, $src2}"), 661 [(set EFLAGS, (Op addr:$dst, GR8:$src2))]>, LOCK; 662 663def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 664 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 665 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), 666 !strconcat(mnemonic, "{w}\t", 667 "{$src2, $dst|$dst, $src2}"), 668 [(set EFLAGS, (Op addr:$dst, GR16:$src2))]>, 669 OpSize16, LOCK; 670 671def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 672 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 673 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), 674 !strconcat(mnemonic, "{l}\t", 675 "{$src2, $dst|$dst, $src2}"), 676 [(set EFLAGS, (Op addr:$dst, GR32:$src2))]>, 677 OpSize32, LOCK; 678 679def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 680 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 681 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), 682 !strconcat(mnemonic, "{q}\t", 683 "{$src2, $dst|$dst, $src2}"), 684 [(set EFLAGS, (Op addr:$dst, GR64:$src2))]>, LOCK; 685 686// NOTE: These are order specific, we want the mi8 forms to be listed 687// first so that they are slightly preferred to the mi forms. 688def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 689 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 690 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2), 691 !strconcat(mnemonic, "{w}\t", 692 "{$src2, $dst|$dst, $src2}"), 693 [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))]>, 694 OpSize16, LOCK; 695 696def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 697 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 698 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2), 699 !strconcat(mnemonic, "{l}\t", 700 "{$src2, $dst|$dst, $src2}"), 701 [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))]>, 702 OpSize32, LOCK; 703 704def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 705 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 706 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2), 707 !strconcat(mnemonic, "{q}\t", 708 "{$src2, $dst|$dst, $src2}"), 709 [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))]>, 710 LOCK; 711 712def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 713 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 }, 714 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2), 715 !strconcat(mnemonic, "{b}\t", 716 "{$src2, $dst|$dst, $src2}"), 717 [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))]>, LOCK; 718 719def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 720 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 721 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2), 722 !strconcat(mnemonic, "{w}\t", 723 "{$src2, $dst|$dst, $src2}"), 724 [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))]>, 725 OpSize16, LOCK; 726 727def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 728 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 729 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2), 730 !strconcat(mnemonic, "{l}\t", 731 "{$src2, $dst|$dst, $src2}"), 732 [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))]>, 733 OpSize32, LOCK; 734 735def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 736 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 737 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2), 738 !strconcat(mnemonic, "{q}\t", 739 "{$src2, $dst|$dst, $src2}"), 740 [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))]>, 741 LOCK; 742} 743 744} 745 746defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">; 747defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">; 748defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">; 749defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">; 750defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">; 751 752def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs), 753 (X86lock_add node:$lhs, node:$rhs), [{ 754 return hasNoCarryFlagUses(SDValue(N, 0)); 755}]>; 756 757def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs), 758 (X86lock_sub node:$lhs, node:$rhs), [{ 759 return hasNoCarryFlagUses(SDValue(N, 0)); 760}]>; 761 762let Predicates = [UseIncDec] in { 763 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1, 764 SchedRW = [WriteALURMW] in { 765 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), 766 "inc{b}\t$dst", 767 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i8 1)))]>, 768 LOCK; 769 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), 770 "inc{w}\t$dst", 771 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i16 1)))]>, 772 OpSize16, LOCK; 773 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), 774 "inc{l}\t$dst", 775 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i32 1)))]>, 776 OpSize32, LOCK; 777 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), 778 "inc{q}\t$dst", 779 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i64 1)))]>, 780 LOCK; 781 782 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), 783 "dec{b}\t$dst", 784 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i8 1)))]>, 785 LOCK; 786 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), 787 "dec{w}\t$dst", 788 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i16 1)))]>, 789 OpSize16, LOCK; 790 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), 791 "dec{l}\t$dst", 792 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i32 1)))]>, 793 OpSize32, LOCK; 794 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), 795 "dec{q}\t$dst", 796 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i64 1)))]>, 797 LOCK; 798 } 799 800 // Additional patterns for -1 constant. 801 def : Pat<(X86lock_add addr:$dst, (i8 -1)), (LOCK_DEC8m addr:$dst)>; 802 def : Pat<(X86lock_add addr:$dst, (i16 -1)), (LOCK_DEC16m addr:$dst)>; 803 def : Pat<(X86lock_add addr:$dst, (i32 -1)), (LOCK_DEC32m addr:$dst)>; 804 def : Pat<(X86lock_add addr:$dst, (i64 -1)), (LOCK_DEC64m addr:$dst)>; 805 def : Pat<(X86lock_sub addr:$dst, (i8 -1)), (LOCK_INC8m addr:$dst)>; 806 def : Pat<(X86lock_sub addr:$dst, (i16 -1)), (LOCK_INC16m addr:$dst)>; 807 def : Pat<(X86lock_sub addr:$dst, (i32 -1)), (LOCK_INC32m addr:$dst)>; 808 def : Pat<(X86lock_sub addr:$dst, (i64 -1)), (LOCK_INC64m addr:$dst)>; 809} 810 811// Atomic compare and swap. 812multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form, 813 string mnemonic, SDPatternOperator frag> { 814let isCodeGenOnly = 1, SchedRW = [WriteCMPXCHGRMW] in { 815 let Defs = [AL, EFLAGS], Uses = [AL] in 816 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap), 817 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"), 818 [(frag addr:$ptr, GR8:$swap, 1)]>, TB, LOCK; 819 let Defs = [AX, EFLAGS], Uses = [AX] in 820 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap), 821 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"), 822 [(frag addr:$ptr, GR16:$swap, 2)]>, TB, OpSize16, LOCK; 823 let Defs = [EAX, EFLAGS], Uses = [EAX] in 824 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap), 825 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"), 826 [(frag addr:$ptr, GR32:$swap, 4)]>, TB, OpSize32, LOCK; 827 let Defs = [RAX, EFLAGS], Uses = [RAX] in 828 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap), 829 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"), 830 [(frag addr:$ptr, GR64:$swap, 8)]>, TB, LOCK; 831} 832} 833 834let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX], 835 Predicates = [HasCmpxchg8b], SchedRW = [WriteCMPXCHGRMW], 836 isCodeGenOnly = 1, usesCustomInserter = 1 in { 837def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr), 838 "cmpxchg8b\t$ptr", 839 [(X86cas8 addr:$ptr)]>, TB, LOCK; 840} 841 842let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX], 843 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW], 844 isCodeGenOnly = 1, mayLoad = 1, mayStore = 1, hasSideEffects = 0 in { 845def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr), 846 "cmpxchg16b\t$ptr", 847 []>, TB, LOCK; 848} 849 850// This pseudo must be used when the frame uses RBX as 851// the base pointer. Indeed, in such situation RBX is a reserved 852// register and the register allocator will ignore any use/def of 853// it. In other words, the register will not fix the clobbering of 854// RBX that will happen when setting the arguments for the instrucion. 855// 856// Unlike the actual related instruction, we mark that this one 857// defines RBX (instead of using RBX). 858// The rationale is that we will define RBX during the expansion of 859// the pseudo. The argument feeding RBX is rbx_input. 860// 861// The additional argument, $rbx_save, is a temporary register used to 862// save the value of RBX across the actual instruction. 863// 864// To make sure the register assigned to $rbx_save does not interfere with 865// the definition of the actual instruction, we use a definition $dst which 866// is tied to $rbx_save. That way, the live-range of $rbx_save spans across 867// the instruction and we are sure we will have a valid register to restore 868// the value of RBX. 869let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX], 870 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW], 871 isCodeGenOnly = 1, isPseudo = 1, 872 mayLoad = 1, mayStore = 1, hasSideEffects = 0, 873 Constraints = "$rbx_save = $dst" in { 874def LCMPXCHG16B_SAVE_RBX : 875 I<0, Pseudo, (outs GR64:$dst), 876 (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save), "", []>; 877} 878 879// Pseudo instruction that doesn't read/write RBX. Will be turned into either 880// LCMPXCHG16B_SAVE_RBX or LCMPXCHG16B via a custom inserter. 881let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RCX, RDX], 882 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW], 883 isCodeGenOnly = 1, isPseudo = 1, 884 mayLoad = 1, mayStore = 1, hasSideEffects = 0, 885 usesCustomInserter = 1 in { 886def LCMPXCHG16B_NO_RBX : 887 I<0, Pseudo, (outs), (ins i128mem:$ptr, GR64:$rbx_input), "", 888 [(X86cas16 addr:$ptr, GR64:$rbx_input)]>; 889} 890 891// This pseudo must be used when the frame uses RBX/EBX as 892// the base pointer. 893// cf comment for LCMPXCHG16B_SAVE_RBX. 894let Defs = [EBX], Uses = [ECX, EAX], 895 Predicates = [HasMWAITX], SchedRW = [WriteSystem], 896 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst" in { 897def MWAITX_SAVE_RBX : 898 I<0, Pseudo, (outs GR64:$dst), 899 (ins GR32:$ebx_input, GR64:$rbx_save), 900 "mwaitx", 901 []>; 902} 903 904// Pseudo mwaitx instruction to use for custom insertion. 905let Predicates = [HasMWAITX], SchedRW = [WriteSystem], 906 isCodeGenOnly = 1, isPseudo = 1, 907 usesCustomInserter = 1 in { 908def MWAITX : 909 I<0, Pseudo, (outs), (ins GR32:$ecx, GR32:$eax, GR32:$ebx), 910 "mwaitx", 911 [(int_x86_mwaitx GR32:$ecx, GR32:$eax, GR32:$ebx)]>; 912} 913 914 915defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", X86cas>; 916 917// Atomic exchange and add 918multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic, 919 string frag> { 920 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1, 921 SchedRW = [WriteALURMW] in { 922 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst), 923 (ins GR8:$val, i8mem:$ptr), 924 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"), 925 [(set GR8:$dst, 926 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>; 927 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst), 928 (ins GR16:$val, i16mem:$ptr), 929 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"), 930 [(set 931 GR16:$dst, 932 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>, 933 OpSize16; 934 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst), 935 (ins GR32:$val, i32mem:$ptr), 936 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"), 937 [(set 938 GR32:$dst, 939 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>, 940 OpSize32; 941 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst), 942 (ins GR64:$val, i64mem:$ptr), 943 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"), 944 [(set 945 GR64:$dst, 946 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>; 947 } 948} 949 950defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add">, TB, LOCK; 951 952/* The following multiclass tries to make sure that in code like 953 * x.store (immediate op x.load(acquire), release) 954 * and 955 * x.store (register op x.load(acquire), release) 956 * an operation directly on memory is generated instead of wasting a register. 957 * It is not automatic as atomic_store/load are only lowered to MOV instructions 958 * extremely late to prevent them from being accidentally reordered in the backend 959 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions) 960 */ 961multiclass RELEASE_BINOP_MI<string Name, SDNode op> { 962 def : Pat<(atomic_store_8 addr:$dst, 963 (op (atomic_load_8 addr:$dst), (i8 imm:$src))), 964 (!cast<Instruction>(Name#"8mi") addr:$dst, imm:$src)>; 965 def : Pat<(atomic_store_16 addr:$dst, 966 (op (atomic_load_16 addr:$dst), (i16 imm:$src))), 967 (!cast<Instruction>(Name#"16mi") addr:$dst, imm:$src)>; 968 def : Pat<(atomic_store_32 addr:$dst, 969 (op (atomic_load_32 addr:$dst), (i32 imm:$src))), 970 (!cast<Instruction>(Name#"32mi") addr:$dst, imm:$src)>; 971 def : Pat<(atomic_store_64 addr:$dst, 972 (op (atomic_load_64 addr:$dst), (i64immSExt32:$src))), 973 (!cast<Instruction>(Name#"64mi32") addr:$dst, (i64immSExt32:$src))>; 974 975 def : Pat<(atomic_store_8 addr:$dst, 976 (op (atomic_load_8 addr:$dst), (i8 GR8:$src))), 977 (!cast<Instruction>(Name#"8mr") addr:$dst, GR8:$src)>; 978 def : Pat<(atomic_store_16 addr:$dst, 979 (op (atomic_load_16 addr:$dst), (i16 GR16:$src))), 980 (!cast<Instruction>(Name#"16mr") addr:$dst, GR16:$src)>; 981 def : Pat<(atomic_store_32 addr:$dst, 982 (op (atomic_load_32 addr:$dst), (i32 GR32:$src))), 983 (!cast<Instruction>(Name#"32mr") addr:$dst, GR32:$src)>; 984 def : Pat<(atomic_store_64 addr:$dst, 985 (op (atomic_load_64 addr:$dst), (i64 GR64:$src))), 986 (!cast<Instruction>(Name#"64mr") addr:$dst, GR64:$src)>; 987} 988defm : RELEASE_BINOP_MI<"ADD", add>; 989defm : RELEASE_BINOP_MI<"AND", and>; 990defm : RELEASE_BINOP_MI<"OR", or>; 991defm : RELEASE_BINOP_MI<"XOR", xor>; 992defm : RELEASE_BINOP_MI<"SUB", sub>; 993 994// Atomic load + floating point patterns. 995// FIXME: This could also handle SIMD operations with *ps and *pd instructions. 996multiclass ATOMIC_LOAD_FP_BINOP_MI<string Name, SDNode op> { 997 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 998 (!cast<Instruction>(Name#"SSrm") FR32:$src1, addr:$src2)>, 999 Requires<[UseSSE1]>; 1000 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 1001 (!cast<Instruction>("V"#Name#"SSrm") FR32:$src1, addr:$src2)>, 1002 Requires<[UseAVX]>; 1003 def : Pat<(op FR32X:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 1004 (!cast<Instruction>("V"#Name#"SSZrm") FR32X:$src1, addr:$src2)>, 1005 Requires<[HasAVX512]>; 1006 1007 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 1008 (!cast<Instruction>(Name#"SDrm") FR64:$src1, addr:$src2)>, 1009 Requires<[UseSSE1]>; 1010 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 1011 (!cast<Instruction>("V"#Name#"SDrm") FR64:$src1, addr:$src2)>, 1012 Requires<[UseAVX]>; 1013 def : Pat<(op FR64X:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 1014 (!cast<Instruction>("V"#Name#"SDZrm") FR64X:$src1, addr:$src2)>, 1015 Requires<[HasAVX512]>; 1016} 1017defm : ATOMIC_LOAD_FP_BINOP_MI<"ADD", fadd>; 1018// FIXME: Add fsub, fmul, fdiv, ... 1019 1020multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32, 1021 dag dag64> { 1022 def : Pat<(atomic_store_8 addr:$dst, dag8), 1023 (!cast<Instruction>(Name#8m) addr:$dst)>; 1024 def : Pat<(atomic_store_16 addr:$dst, dag16), 1025 (!cast<Instruction>(Name#16m) addr:$dst)>; 1026 def : Pat<(atomic_store_32 addr:$dst, dag32), 1027 (!cast<Instruction>(Name#32m) addr:$dst)>; 1028 def : Pat<(atomic_store_64 addr:$dst, dag64), 1029 (!cast<Instruction>(Name#64m) addr:$dst)>; 1030} 1031 1032let Predicates = [UseIncDec] in { 1033 defm : RELEASE_UNOP<"INC", 1034 (add (atomic_load_8 addr:$dst), (i8 1)), 1035 (add (atomic_load_16 addr:$dst), (i16 1)), 1036 (add (atomic_load_32 addr:$dst), (i32 1)), 1037 (add (atomic_load_64 addr:$dst), (i64 1))>; 1038 defm : RELEASE_UNOP<"DEC", 1039 (add (atomic_load_8 addr:$dst), (i8 -1)), 1040 (add (atomic_load_16 addr:$dst), (i16 -1)), 1041 (add (atomic_load_32 addr:$dst), (i32 -1)), 1042 (add (atomic_load_64 addr:$dst), (i64 -1))>; 1043} 1044 1045defm : RELEASE_UNOP<"NEG", 1046 (ineg (i8 (atomic_load_8 addr:$dst))), 1047 (ineg (i16 (atomic_load_16 addr:$dst))), 1048 (ineg (i32 (atomic_load_32 addr:$dst))), 1049 (ineg (i64 (atomic_load_64 addr:$dst)))>; 1050defm : RELEASE_UNOP<"NOT", 1051 (not (i8 (atomic_load_8 addr:$dst))), 1052 (not (i16 (atomic_load_16 addr:$dst))), 1053 (not (i32 (atomic_load_32 addr:$dst))), 1054 (not (i64 (atomic_load_64 addr:$dst)))>; 1055 1056def : Pat<(atomic_store_8 addr:$dst, (i8 imm:$src)), 1057 (MOV8mi addr:$dst, imm:$src)>; 1058def : Pat<(atomic_store_16 addr:$dst, (i16 imm:$src)), 1059 (MOV16mi addr:$dst, imm:$src)>; 1060def : Pat<(atomic_store_32 addr:$dst, (i32 imm:$src)), 1061 (MOV32mi addr:$dst, imm:$src)>; 1062def : Pat<(atomic_store_64 addr:$dst, (i64immSExt32:$src)), 1063 (MOV64mi32 addr:$dst, i64immSExt32:$src)>; 1064 1065def : Pat<(atomic_store_8 addr:$dst, GR8:$src), 1066 (MOV8mr addr:$dst, GR8:$src)>; 1067def : Pat<(atomic_store_16 addr:$dst, GR16:$src), 1068 (MOV16mr addr:$dst, GR16:$src)>; 1069def : Pat<(atomic_store_32 addr:$dst, GR32:$src), 1070 (MOV32mr addr:$dst, GR32:$src)>; 1071def : Pat<(atomic_store_64 addr:$dst, GR64:$src), 1072 (MOV64mr addr:$dst, GR64:$src)>; 1073 1074def : Pat<(i8 (atomic_load_8 addr:$src)), (MOV8rm addr:$src)>; 1075def : Pat<(i16 (atomic_load_16 addr:$src)), (MOV16rm addr:$src)>; 1076def : Pat<(i32 (atomic_load_32 addr:$src)), (MOV32rm addr:$src)>; 1077def : Pat<(i64 (atomic_load_64 addr:$src)), (MOV64rm addr:$src)>; 1078 1079// Floating point loads/stores. 1080def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1081 (MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>; 1082def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1083 (VMOVSSmr addr:$dst, FR32:$src)>, Requires<[UseAVX]>; 1084def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1085 (VMOVSSZmr addr:$dst, FR32:$src)>, Requires<[HasAVX512]>; 1086 1087def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1088 (MOVSDmr addr:$dst, FR64:$src)>, Requires<[UseSSE2]>; 1089def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1090 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[UseAVX]>; 1091def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1092 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[HasAVX512]>; 1093 1094def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1095 (MOVSSrm_alt addr:$src)>, Requires<[UseSSE1]>; 1096def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1097 (VMOVSSrm_alt addr:$src)>, Requires<[UseAVX]>; 1098def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1099 (VMOVSSZrm_alt addr:$src)>, Requires<[HasAVX512]>; 1100 1101def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1102 (MOVSDrm_alt addr:$src)>, Requires<[UseSSE2]>; 1103def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1104 (VMOVSDrm_alt addr:$src)>, Requires<[UseAVX]>; 1105def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1106 (VMOVSDZrm_alt addr:$src)>, Requires<[HasAVX512]>; 1107 1108//===----------------------------------------------------------------------===// 1109// DAG Pattern Matching Rules 1110//===----------------------------------------------------------------------===// 1111 1112// Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves 1113// binary size compared to a regular MOV, but it introduces an unnecessary 1114// load, so is not suitable for regular or optsize functions. 1115let Predicates = [OptForMinSize] in { 1116def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>; 1117def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>; 1118def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>; 1119def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>; 1120def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>; 1121def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>; 1122} 1123 1124// In kernel code model, we can get the address of a label 1125// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of 1126// the MOV64ri32 should accept these. 1127def : Pat<(i64 (X86Wrapper tconstpool :$dst)), 1128 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>; 1129def : Pat<(i64 (X86Wrapper tjumptable :$dst)), 1130 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>; 1131def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), 1132 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>; 1133def : Pat<(i64 (X86Wrapper texternalsym:$dst)), 1134 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>; 1135def : Pat<(i64 (X86Wrapper mcsym:$dst)), 1136 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>; 1137def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), 1138 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>; 1139 1140// If we have small model and -static mode, it is safe to store global addresses 1141// directly as immediates. FIXME: This is really a hack, the 'imm' predicate 1142// for MOV64mi32 should handle this sort of thing. 1143def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst), 1144 (MOV64mi32 addr:$dst, tconstpool:$src)>, 1145 Requires<[NearData, IsNotPIC]>; 1146def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst), 1147 (MOV64mi32 addr:$dst, tjumptable:$src)>, 1148 Requires<[NearData, IsNotPIC]>; 1149def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst), 1150 (MOV64mi32 addr:$dst, tglobaladdr:$src)>, 1151 Requires<[NearData, IsNotPIC]>; 1152def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst), 1153 (MOV64mi32 addr:$dst, texternalsym:$src)>, 1154 Requires<[NearData, IsNotPIC]>; 1155def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst), 1156 (MOV64mi32 addr:$dst, mcsym:$src)>, 1157 Requires<[NearData, IsNotPIC]>; 1158def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst), 1159 (MOV64mi32 addr:$dst, tblockaddress:$src)>, 1160 Requires<[NearData, IsNotPIC]>; 1161 1162def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>; 1163def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>; 1164 1165// Calls 1166 1167// tls has some funny stuff here... 1168// This corresponds to movabs $foo@tpoff, %rax 1169def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)), 1170 (MOV64ri32 tglobaltlsaddr :$dst)>; 1171// This corresponds to add $foo@tpoff, %rax 1172def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)), 1173 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>; 1174 1175 1176// Direct PC relative function call for small code model. 32-bit displacement 1177// sign extended to 64-bit. 1178def : Pat<(X86call (i64 tglobaladdr:$dst)), 1179 (CALL64pcrel32 tglobaladdr:$dst)>; 1180def : Pat<(X86call (i64 texternalsym:$dst)), 1181 (CALL64pcrel32 texternalsym:$dst)>; 1182 1183// Tailcall stuff. The TCRETURN instructions execute after the epilog, so they 1184// can never use callee-saved registers. That is the purpose of the GR64_TC 1185// register classes. 1186// 1187// The only volatile register that is never used by the calling convention is 1188// %r11. This happens when calling a vararg function with 6 arguments. 1189// 1190// Match an X86tcret that uses less than 7 volatile registers. 1191def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off), 1192 (X86tcret node:$ptr, node:$off), [{ 1193 // X86tcret args: (*chain, ptr, imm, regs..., glue) 1194 unsigned NumRegs = 0; 1195 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i) 1196 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6) 1197 return false; 1198 return true; 1199}]>; 1200 1201def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off), 1202 (TCRETURNri ptr_rc_tailcall:$dst, timm:$off)>, 1203 Requires<[Not64BitMode, NotUseIndirectThunkCalls]>; 1204 1205// FIXME: This is disabled for 32-bit PIC mode because the global base 1206// register which is part of the address mode may be assigned a 1207// callee-saved register. 1208def : Pat<(X86tcret (load addr:$dst), timm:$off), 1209 (TCRETURNmi addr:$dst, timm:$off)>, 1210 Requires<[Not64BitMode, IsNotPIC, NotUseIndirectThunkCalls]>; 1211 1212def : Pat<(X86tcret (i32 tglobaladdr:$dst), timm:$off), 1213 (TCRETURNdi tglobaladdr:$dst, timm:$off)>, 1214 Requires<[NotLP64]>; 1215 1216def : Pat<(X86tcret (i32 texternalsym:$dst), timm:$off), 1217 (TCRETURNdi texternalsym:$dst, timm:$off)>, 1218 Requires<[NotLP64]>; 1219 1220def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off), 1221 (TCRETURNri64 ptr_rc_tailcall:$dst, timm:$off)>, 1222 Requires<[In64BitMode, NotUseIndirectThunkCalls]>; 1223 1224// Don't fold loads into X86tcret requiring more than 6 regs. 1225// There wouldn't be enough scratch registers for base+index. 1226def : Pat<(X86tcret_6regs (load addr:$dst), timm:$off), 1227 (TCRETURNmi64 addr:$dst, timm:$off)>, 1228 Requires<[In64BitMode, NotUseIndirectThunkCalls]>; 1229 1230def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off), 1231 (INDIRECT_THUNK_TCRETURN64 ptr_rc_tailcall:$dst, timm:$off)>, 1232 Requires<[In64BitMode, UseIndirectThunkCalls]>; 1233 1234def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off), 1235 (INDIRECT_THUNK_TCRETURN32 ptr_rc_tailcall:$dst, timm:$off)>, 1236 Requires<[Not64BitMode, UseIndirectThunkCalls]>; 1237 1238def : Pat<(X86tcret (i64 tglobaladdr:$dst), timm:$off), 1239 (TCRETURNdi64 tglobaladdr:$dst, timm:$off)>, 1240 Requires<[IsLP64]>; 1241 1242def : Pat<(X86tcret (i64 texternalsym:$dst), timm:$off), 1243 (TCRETURNdi64 texternalsym:$dst, timm:$off)>, 1244 Requires<[IsLP64]>; 1245 1246// Normal calls, with various flavors of addresses. 1247def : Pat<(X86call (i32 tglobaladdr:$dst)), 1248 (CALLpcrel32 tglobaladdr:$dst)>; 1249def : Pat<(X86call (i32 texternalsym:$dst)), 1250 (CALLpcrel32 texternalsym:$dst)>; 1251def : Pat<(X86call (i32 imm:$dst)), 1252 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>; 1253 1254// Comparisons. 1255 1256// TEST R,R is smaller than CMP R,0 1257def : Pat<(X86cmp GR8:$src1, 0), 1258 (TEST8rr GR8:$src1, GR8:$src1)>; 1259def : Pat<(X86cmp GR16:$src1, 0), 1260 (TEST16rr GR16:$src1, GR16:$src1)>; 1261def : Pat<(X86cmp GR32:$src1, 0), 1262 (TEST32rr GR32:$src1, GR32:$src1)>; 1263def : Pat<(X86cmp GR64:$src1, 0), 1264 (TEST64rr GR64:$src1, GR64:$src1)>; 1265 1266// zextload bool -> zextload byte 1267// i1 stored in one byte in zero-extended form. 1268// Upper bits cleanup should be executed before Store. 1269def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>; 1270def : Pat<(zextloadi16i1 addr:$src), 1271 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1272def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; 1273def : Pat<(zextloadi64i1 addr:$src), 1274 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1275 1276// extload bool -> extload byte 1277// When extloading from 16-bit and smaller memory locations into 64-bit 1278// registers, use zero-extending loads so that the entire 64-bit register is 1279// defined, avoiding partial-register updates. 1280 1281def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>; 1282def : Pat<(extloadi16i1 addr:$src), 1283 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1284def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; 1285def : Pat<(extloadi16i8 addr:$src), 1286 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1287def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>; 1288def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>; 1289 1290// For other extloads, use subregs, since the high contents of the register are 1291// defined after an extload. 1292// NOTE: The extloadi64i32 pattern needs to be first as it will try to form 1293// 32-bit loads for 4 byte aligned i8/i16 loads. 1294def : Pat<(extloadi64i32 addr:$src), 1295 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>; 1296def : Pat<(extloadi64i1 addr:$src), 1297 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1298def : Pat<(extloadi64i8 addr:$src), 1299 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1300def : Pat<(extloadi64i16 addr:$src), 1301 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>; 1302 1303// anyext. Define these to do an explicit zero-extend to 1304// avoid partial-register updates. 1305def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG 1306 (MOVZX32rr8 GR8 :$src), sub_16bit)>; 1307def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>; 1308 1309// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32. 1310def : Pat<(i32 (anyext GR16:$src)), 1311 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>; 1312 1313def : Pat<(i64 (anyext GR8 :$src)), 1314 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>; 1315def : Pat<(i64 (anyext GR16:$src)), 1316 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>; 1317def : Pat<(i64 (anyext GR32:$src)), 1318 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>; 1319 1320// If this is an anyext of the remainder of an 8-bit sdivrem, use a MOVSX 1321// instead of a MOVZX. The sdivrem lowering will emit emit a MOVSX to move 1322// %ah to the lower byte of a register. By using a MOVSX here we allow a 1323// post-isel peephole to merge the two MOVSX instructions into one. 1324def anyext_sdiv : PatFrag<(ops node:$lhs), (anyext node:$lhs),[{ 1325 return (N->getOperand(0).getOpcode() == ISD::SDIVREM && 1326 N->getOperand(0).getResNo() == 1); 1327}]>; 1328def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>; 1329 1330// Any instruction that defines a 32-bit result leaves the high half of the 1331// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may 1332// be copying from a truncate. Any other 32-bit operation will zero-extend 1333// up to 64 bits. AssertSext/AssertZext aren't saying anything about the upper 1334// 32 bits, they're probably just qualifying a CopyFromReg. 1335def def32 : PatLeaf<(i32 GR32:$src), [{ 1336 return N->getOpcode() != ISD::TRUNCATE && 1337 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG && 1338 N->getOpcode() != ISD::CopyFromReg && 1339 N->getOpcode() != ISD::AssertSext && 1340 N->getOpcode() != ISD::AssertZext; 1341}]>; 1342 1343// In the case of a 32-bit def that is known to implicitly zero-extend, 1344// we can use a SUBREG_TO_REG. 1345def : Pat<(i64 (zext def32:$src)), 1346 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; 1347def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)), 1348 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; 1349 1350//===----------------------------------------------------------------------===// 1351// Pattern match OR as ADD 1352//===----------------------------------------------------------------------===// 1353 1354// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be 1355// 3-addressified into an LEA instruction to avoid copies. However, we also 1356// want to finally emit these instructions as an or at the end of the code 1357// generator to make the generated code easier to read. To do this, we select 1358// into "disjoint bits" pseudo ops. 1359 1360// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero. 1361def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{ 1362 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1))) 1363 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue()); 1364 1365 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0); 1366 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0); 1367 return (~Known0.Zero & ~Known1.Zero) == 0; 1368}]>; 1369 1370 1371// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits. 1372// Try this before the selecting to OR. 1373let SchedRW = [WriteALU] in { 1374 1375let isConvertibleToThreeAddress = 1, isPseudo = 1, 1376 Constraints = "$src1 = $dst", Defs = [EFLAGS] in { 1377let isCommutable = 1 in { 1378def ADD8rr_DB : I<0, Pseudo, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), 1379 "", // orb/addb REG, REG 1380 [(set GR8:$dst, (or_is_add GR8:$src1, GR8:$src2))]>; 1381def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), 1382 "", // orw/addw REG, REG 1383 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>; 1384def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), 1385 "", // orl/addl REG, REG 1386 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>; 1387def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), 1388 "", // orq/addq REG, REG 1389 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>; 1390} // isCommutable 1391 1392// NOTE: These are order specific, we want the ri8 forms to be listed 1393// first so that they are slightly preferred to the ri forms. 1394 1395def ADD8ri_DB : I<0, Pseudo, 1396 (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2), 1397 "", // orb/addb REG, imm8 1398 [(set GR8:$dst, (or_is_add GR8:$src1, imm:$src2))]>; 1399def ADD16ri8_DB : I<0, Pseudo, 1400 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2), 1401 "", // orw/addw REG, imm8 1402 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>; 1403def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2), 1404 "", // orw/addw REG, imm 1405 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>; 1406 1407def ADD32ri8_DB : I<0, Pseudo, 1408 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2), 1409 "", // orl/addl REG, imm8 1410 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>; 1411def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2), 1412 "", // orl/addl REG, imm 1413 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>; 1414 1415 1416def ADD64ri8_DB : I<0, Pseudo, 1417 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), 1418 "", // orq/addq REG, imm8 1419 [(set GR64:$dst, (or_is_add GR64:$src1, 1420 i64immSExt8:$src2))]>; 1421def ADD64ri32_DB : I<0, Pseudo, 1422 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), 1423 "", // orq/addq REG, imm 1424 [(set GR64:$dst, (or_is_add GR64:$src1, 1425 i64immSExt32:$src2))]>; 1426} 1427} // AddedComplexity, SchedRW 1428 1429//===----------------------------------------------------------------------===// 1430// Pattern match SUB as XOR 1431//===----------------------------------------------------------------------===// 1432 1433// An immediate in the LHS of a subtract can't be encoded in the instruction. 1434// If there is no possibility of a borrow we can use an XOR instead of a SUB 1435// to enable the immediate to be folded. 1436// TODO: Move this to a DAG combine? 1437 1438def sub_is_xor : PatFrag<(ops node:$lhs, node:$rhs), (sub node:$lhs, node:$rhs),[{ 1439 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 1440 KnownBits Known = CurDAG->computeKnownBits(N->getOperand(1)); 1441 1442 // If all possible ones in the RHS are set in the LHS then there can't be 1443 // a borrow and we can use xor. 1444 return (~Known.Zero).isSubsetOf(CN->getAPIntValue()); 1445 } 1446 1447 return false; 1448}]>; 1449 1450let AddedComplexity = 5 in { 1451def : Pat<(sub_is_xor imm:$src2, GR8:$src1), 1452 (XOR8ri GR8:$src1, imm:$src2)>; 1453def : Pat<(sub_is_xor i16immSExt8:$src2, GR16:$src1), 1454 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; 1455def : Pat<(sub_is_xor imm:$src2, GR16:$src1), 1456 (XOR16ri GR16:$src1, imm:$src2)>; 1457def : Pat<(sub_is_xor i32immSExt8:$src2, GR32:$src1), 1458 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; 1459def : Pat<(sub_is_xor imm:$src2, GR32:$src1), 1460 (XOR32ri GR32:$src1, imm:$src2)>; 1461def : Pat<(sub_is_xor i64immSExt8:$src2, GR64:$src1), 1462 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; 1463def : Pat<(sub_is_xor i64immSExt32:$src2, GR64:$src1), 1464 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; 1465} 1466 1467//===----------------------------------------------------------------------===// 1468// Some peepholes 1469//===----------------------------------------------------------------------===// 1470 1471// Odd encoding trick: -128 fits into an 8-bit immediate field while 1472// +128 doesn't, so in this special case use a sub instead of an add. 1473def : Pat<(add GR16:$src1, 128), 1474 (SUB16ri8 GR16:$src1, -128)>; 1475def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst), 1476 (SUB16mi8 addr:$dst, -128)>; 1477 1478def : Pat<(add GR32:$src1, 128), 1479 (SUB32ri8 GR32:$src1, -128)>; 1480def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst), 1481 (SUB32mi8 addr:$dst, -128)>; 1482 1483def : Pat<(add GR64:$src1, 128), 1484 (SUB64ri8 GR64:$src1, -128)>; 1485def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst), 1486 (SUB64mi8 addr:$dst, -128)>; 1487 1488def : Pat<(X86add_flag_nocf GR16:$src1, 128), 1489 (SUB16ri8 GR16:$src1, -128)>; 1490def : Pat<(X86add_flag_nocf GR32:$src1, 128), 1491 (SUB32ri8 GR32:$src1, -128)>; 1492def : Pat<(X86add_flag_nocf GR64:$src1, 128), 1493 (SUB64ri8 GR64:$src1, -128)>; 1494 1495// The same trick applies for 32-bit immediate fields in 64-bit 1496// instructions. 1497def : Pat<(add GR64:$src1, 0x0000000080000000), 1498 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; 1499def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst), 1500 (SUB64mi32 addr:$dst, 0xffffffff80000000)>; 1501 1502def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000), 1503 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; 1504 1505// To avoid needing to materialize an immediate in a register, use a 32-bit and 1506// with implicit zero-extension instead of a 64-bit and if the immediate has at 1507// least 32 bits of leading zeros. If in addition the last 32 bits can be 1508// represented with a sign extension of a 8 bit constant, use that. 1509// This can also reduce instruction size by eliminating the need for the REX 1510// prefix. 1511 1512// AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32. 1513let AddedComplexity = 1 in { 1514def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm), 1515 (SUBREG_TO_REG 1516 (i64 0), 1517 (AND32ri8 1518 (EXTRACT_SUBREG GR64:$src, sub_32bit), 1519 (i32 (GetLo32XForm imm:$imm))), 1520 sub_32bit)>; 1521 1522def : Pat<(and GR64:$src, i64immZExt32:$imm), 1523 (SUBREG_TO_REG 1524 (i64 0), 1525 (AND32ri 1526 (EXTRACT_SUBREG GR64:$src, sub_32bit), 1527 (i32 (GetLo32XForm imm:$imm))), 1528 sub_32bit)>; 1529} // AddedComplexity = 1 1530 1531 1532// AddedComplexity is needed due to the increased complexity on the 1533// i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all 1534// the MOVZX patterns keeps thems together in DAGIsel tables. 1535let AddedComplexity = 1 in { 1536// r & (2^16-1) ==> movz 1537def : Pat<(and GR32:$src1, 0xffff), 1538 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>; 1539// r & (2^8-1) ==> movz 1540def : Pat<(and GR32:$src1, 0xff), 1541 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>; 1542// r & (2^8-1) ==> movz 1543def : Pat<(and GR16:$src1, 0xff), 1544 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)), 1545 sub_16bit)>; 1546 1547// r & (2^32-1) ==> movz 1548def : Pat<(and GR64:$src, 0x00000000FFFFFFFF), 1549 (SUBREG_TO_REG (i64 0), 1550 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)), 1551 sub_32bit)>; 1552// r & (2^16-1) ==> movz 1553def : Pat<(and GR64:$src, 0xffff), 1554 (SUBREG_TO_REG (i64 0), 1555 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))), 1556 sub_32bit)>; 1557// r & (2^8-1) ==> movz 1558def : Pat<(and GR64:$src, 0xff), 1559 (SUBREG_TO_REG (i64 0), 1560 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))), 1561 sub_32bit)>; 1562} // AddedComplexity = 1 1563 1564 1565// Try to use BTS/BTR/BTC for single bit operations on the upper 32-bits. 1566 1567def BTRXForm : SDNodeXForm<imm, [{ 1568 // Transformation function: Find the lowest 0. 1569 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingOnes(), SDLoc(N)); 1570}]>; 1571 1572def BTCBTSXForm : SDNodeXForm<imm, [{ 1573 // Transformation function: Find the lowest 1. 1574 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingZeros(), SDLoc(N)); 1575}]>; 1576 1577def BTRMask64 : ImmLeaf<i64, [{ 1578 return !isUInt<32>(Imm) && !isInt<32>(Imm) && isPowerOf2_64(~Imm); 1579}]>; 1580 1581def BTCBTSMask64 : ImmLeaf<i64, [{ 1582 return !isInt<32>(Imm) && isPowerOf2_64(Imm); 1583}]>; 1584 1585// For now only do this for optsize. 1586let AddedComplexity = 1, Predicates=[OptForSize] in { 1587 def : Pat<(and GR64:$src1, BTRMask64:$mask), 1588 (BTR64ri8 GR64:$src1, (BTRXForm imm:$mask))>; 1589 def : Pat<(or GR64:$src1, BTCBTSMask64:$mask), 1590 (BTS64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>; 1591 def : Pat<(xor GR64:$src1, BTCBTSMask64:$mask), 1592 (BTC64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>; 1593} 1594 1595 1596// sext_inreg patterns 1597def : Pat<(sext_inreg GR32:$src, i16), 1598 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>; 1599def : Pat<(sext_inreg GR32:$src, i8), 1600 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>; 1601 1602def : Pat<(sext_inreg GR16:$src, i8), 1603 (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)), 1604 sub_16bit)>; 1605 1606def : Pat<(sext_inreg GR64:$src, i32), 1607 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>; 1608def : Pat<(sext_inreg GR64:$src, i16), 1609 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>; 1610def : Pat<(sext_inreg GR64:$src, i8), 1611 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>; 1612 1613// sext, sext_load, zext, zext_load 1614def: Pat<(i16 (sext GR8:$src)), 1615 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>; 1616def: Pat<(sextloadi16i8 addr:$src), 1617 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>; 1618def: Pat<(i16 (zext GR8:$src)), 1619 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>; 1620def: Pat<(zextloadi16i8 addr:$src), 1621 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1622 1623// trunc patterns 1624def : Pat<(i16 (trunc GR32:$src)), 1625 (EXTRACT_SUBREG GR32:$src, sub_16bit)>; 1626def : Pat<(i8 (trunc GR32:$src)), 1627 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), 1628 sub_8bit)>, 1629 Requires<[Not64BitMode]>; 1630def : Pat<(i8 (trunc GR16:$src)), 1631 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1632 sub_8bit)>, 1633 Requires<[Not64BitMode]>; 1634def : Pat<(i32 (trunc GR64:$src)), 1635 (EXTRACT_SUBREG GR64:$src, sub_32bit)>; 1636def : Pat<(i16 (trunc GR64:$src)), 1637 (EXTRACT_SUBREG GR64:$src, sub_16bit)>; 1638def : Pat<(i8 (trunc GR64:$src)), 1639 (EXTRACT_SUBREG GR64:$src, sub_8bit)>; 1640def : Pat<(i8 (trunc GR32:$src)), 1641 (EXTRACT_SUBREG GR32:$src, sub_8bit)>, 1642 Requires<[In64BitMode]>; 1643def : Pat<(i8 (trunc GR16:$src)), 1644 (EXTRACT_SUBREG GR16:$src, sub_8bit)>, 1645 Requires<[In64BitMode]>; 1646 1647def immff00_ffff : ImmLeaf<i32, [{ 1648 return Imm >= 0xff00 && Imm <= 0xffff; 1649}]>; 1650 1651// h-register tricks 1652def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))), 1653 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>, 1654 Requires<[Not64BitMode]>; 1655def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))), 1656 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>, 1657 Requires<[Not64BitMode]>; 1658def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))), 1659 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>, 1660 Requires<[Not64BitMode]>; 1661def : Pat<(srl GR16:$src, (i8 8)), 1662 (EXTRACT_SUBREG 1663 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1664 sub_16bit)>; 1665def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), 1666 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>; 1667def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), 1668 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>; 1669def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), 1670 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>; 1671def : Pat<(srl (and_su GR32:$src, immff00_ffff), (i8 8)), 1672 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>; 1673 1674// h-register tricks. 1675// For now, be conservative on x86-64 and use an h-register extract only if the 1676// value is immediately zero-extended or stored, which are somewhat common 1677// cases. This uses a bunch of code to prevent a register requiring a REX prefix 1678// from being allocated in the same instruction as the h register, as there's 1679// currently no way to describe this requirement to the register allocator. 1680 1681// h-register extract and zero-extend. 1682def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)), 1683 (SUBREG_TO_REG 1684 (i64 0), 1685 (MOVZX32rr8_NOREX 1686 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)), 1687 sub_32bit)>; 1688def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))), 1689 (SUBREG_TO_REG 1690 (i64 0), 1691 (MOVZX32rr8_NOREX 1692 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1693 sub_32bit)>; 1694def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))), 1695 (SUBREG_TO_REG 1696 (i64 0), 1697 (MOVZX32rr8_NOREX 1698 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1699 sub_32bit)>; 1700 1701// h-register extract and store. 1702def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst), 1703 (MOV8mr_NOREX 1704 addr:$dst, 1705 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>; 1706def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst), 1707 (MOV8mr_NOREX 1708 addr:$dst, 1709 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>, 1710 Requires<[In64BitMode]>; 1711def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst), 1712 (MOV8mr_NOREX 1713 addr:$dst, 1714 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>, 1715 Requires<[In64BitMode]>; 1716 1717// Special pattern to catch the last step of __builtin_parity handling. Our 1718// goal is to use an xor of an h-register with the corresponding l-register. 1719// The above patterns would handle this on non 64-bit targets, but for 64-bit 1720// we need to be more careful. We're using a NOREX instruction here in case 1721// register allocation fails to keep the two registers together. So we need to 1722// make sure we can't accidentally mix R8-R15 with an h-register. 1723def : Pat<(X86xor_flag (i8 (trunc GR32:$src)), 1724 (i8 (trunc (srl_su GR32:$src, (i8 8))))), 1725 (XOR8rr_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit), 1726 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>; 1727 1728// (shl x, 1) ==> (add x, x) 1729// Note that if x is undef (immediate or otherwise), we could theoretically 1730// end up with the two uses of x getting different values, producing a result 1731// where the least significant bit is not 0. However, the probability of this 1732// happening is considered low enough that this is officially not a 1733// "real problem". 1734def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>; 1735def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>; 1736def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>; 1737def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>; 1738 1739def shiftMask8 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1740 return isUnneededShiftMask(N, 3); 1741}]>; 1742 1743def shiftMask16 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1744 return isUnneededShiftMask(N, 4); 1745}]>; 1746 1747def shiftMask32 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1748 return isUnneededShiftMask(N, 5); 1749}]>; 1750 1751def shiftMask64 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1752 return isUnneededShiftMask(N, 6); 1753}]>; 1754 1755 1756// Shift amount is implicitly masked. 1757multiclass MaskedShiftAmountPats<SDNode frag, string name> { 1758 // (shift x (and y, 31)) ==> (shift x, y) 1759 def : Pat<(frag GR8:$src1, (shiftMask32 CL)), 1760 (!cast<Instruction>(name # "8rCL") GR8:$src1)>; 1761 def : Pat<(frag GR16:$src1, (shiftMask32 CL)), 1762 (!cast<Instruction>(name # "16rCL") GR16:$src1)>; 1763 def : Pat<(frag GR32:$src1, (shiftMask32 CL)), 1764 (!cast<Instruction>(name # "32rCL") GR32:$src1)>; 1765 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask32 CL)), addr:$dst), 1766 (!cast<Instruction>(name # "8mCL") addr:$dst)>; 1767 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask32 CL)), addr:$dst), 1768 (!cast<Instruction>(name # "16mCL") addr:$dst)>; 1769 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst), 1770 (!cast<Instruction>(name # "32mCL") addr:$dst)>; 1771 1772 // (shift x (and y, 63)) ==> (shift x, y) 1773 def : Pat<(frag GR64:$src1, (shiftMask64 CL)), 1774 (!cast<Instruction>(name # "64rCL") GR64:$src1)>; 1775 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst), 1776 (!cast<Instruction>(name # "64mCL") addr:$dst)>; 1777} 1778 1779defm : MaskedShiftAmountPats<shl, "SHL">; 1780defm : MaskedShiftAmountPats<srl, "SHR">; 1781defm : MaskedShiftAmountPats<sra, "SAR">; 1782 1783// ROL/ROR instructions allow a stronger mask optimization than shift for 8- and 1784// 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount 1785// because over-rotating produces the same result. This is noted in the Intel 1786// docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation 1787// amount could affect EFLAGS results, but that does not matter because we are 1788// not tracking flags for these nodes. 1789multiclass MaskedRotateAmountPats<SDNode frag, string name> { 1790 // (rot x (and y, BitWidth - 1)) ==> (rot x, y) 1791 def : Pat<(frag GR8:$src1, (shiftMask8 CL)), 1792 (!cast<Instruction>(name # "8rCL") GR8:$src1)>; 1793 def : Pat<(frag GR16:$src1, (shiftMask16 CL)), 1794 (!cast<Instruction>(name # "16rCL") GR16:$src1)>; 1795 def : Pat<(frag GR32:$src1, (shiftMask32 CL)), 1796 (!cast<Instruction>(name # "32rCL") GR32:$src1)>; 1797 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask8 CL)), addr:$dst), 1798 (!cast<Instruction>(name # "8mCL") addr:$dst)>; 1799 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask16 CL)), addr:$dst), 1800 (!cast<Instruction>(name # "16mCL") addr:$dst)>; 1801 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst), 1802 (!cast<Instruction>(name # "32mCL") addr:$dst)>; 1803 1804 // (rot x (and y, 63)) ==> (rot x, y) 1805 def : Pat<(frag GR64:$src1, (shiftMask64 CL)), 1806 (!cast<Instruction>(name # "64rCL") GR64:$src1)>; 1807 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst), 1808 (!cast<Instruction>(name # "64mCL") addr:$dst)>; 1809} 1810 1811 1812defm : MaskedRotateAmountPats<rotl, "ROL">; 1813defm : MaskedRotateAmountPats<rotr, "ROR">; 1814 1815// Double "funnel" shift amount is implicitly masked. 1816// (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) (NOTE: modulo32) 1817def : Pat<(X86fshl GR16:$src1, GR16:$src2, (shiftMask32 CL)), 1818 (SHLD16rrCL GR16:$src1, GR16:$src2)>; 1819def : Pat<(X86fshr GR16:$src2, GR16:$src1, (shiftMask32 CL)), 1820 (SHRD16rrCL GR16:$src1, GR16:$src2)>; 1821 1822// (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) 1823def : Pat<(fshl GR32:$src1, GR32:$src2, (shiftMask32 CL)), 1824 (SHLD32rrCL GR32:$src1, GR32:$src2)>; 1825def : Pat<(fshr GR32:$src2, GR32:$src1, (shiftMask32 CL)), 1826 (SHRD32rrCL GR32:$src1, GR32:$src2)>; 1827 1828// (fshl/fshr x (and y, 63)) ==> (fshl/fshr x, y) 1829def : Pat<(fshl GR64:$src1, GR64:$src2, (shiftMask64 CL)), 1830 (SHLD64rrCL GR64:$src1, GR64:$src2)>; 1831def : Pat<(fshr GR64:$src2, GR64:$src1, (shiftMask64 CL)), 1832 (SHRD64rrCL GR64:$src1, GR64:$src2)>; 1833 1834let Predicates = [HasBMI2] in { 1835 let AddedComplexity = 1 in { 1836 def : Pat<(sra GR32:$src1, (shiftMask32 GR8:$src2)), 1837 (SARX32rr GR32:$src1, 1838 (INSERT_SUBREG 1839 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1840 def : Pat<(sra GR64:$src1, (shiftMask64 GR8:$src2)), 1841 (SARX64rr GR64:$src1, 1842 (INSERT_SUBREG 1843 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1844 1845 def : Pat<(srl GR32:$src1, (shiftMask32 GR8:$src2)), 1846 (SHRX32rr GR32:$src1, 1847 (INSERT_SUBREG 1848 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1849 def : Pat<(srl GR64:$src1, (shiftMask64 GR8:$src2)), 1850 (SHRX64rr GR64:$src1, 1851 (INSERT_SUBREG 1852 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1853 1854 def : Pat<(shl GR32:$src1, (shiftMask32 GR8:$src2)), 1855 (SHLX32rr GR32:$src1, 1856 (INSERT_SUBREG 1857 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1858 def : Pat<(shl GR64:$src1, (shiftMask64 GR8:$src2)), 1859 (SHLX64rr GR64:$src1, 1860 (INSERT_SUBREG 1861 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1862 } 1863 1864 def : Pat<(sra (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1865 (SARX32rm addr:$src1, 1866 (INSERT_SUBREG 1867 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1868 def : Pat<(sra (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1869 (SARX64rm addr:$src1, 1870 (INSERT_SUBREG 1871 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1872 1873 def : Pat<(srl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1874 (SHRX32rm addr:$src1, 1875 (INSERT_SUBREG 1876 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1877 def : Pat<(srl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1878 (SHRX64rm addr:$src1, 1879 (INSERT_SUBREG 1880 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1881 1882 def : Pat<(shl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1883 (SHLX32rm addr:$src1, 1884 (INSERT_SUBREG 1885 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1886 def : Pat<(shl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1887 (SHLX64rm addr:$src1, 1888 (INSERT_SUBREG 1889 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1890} 1891 1892// Use BTR/BTS/BTC for clearing/setting/toggling a bit in a variable location. 1893multiclass one_bit_patterns<RegisterClass RC, ValueType VT, Instruction BTR, 1894 Instruction BTS, Instruction BTC, 1895 PatFrag ShiftMask> { 1896 def : Pat<(and RC:$src1, (rotl -2, GR8:$src2)), 1897 (BTR RC:$src1, 1898 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1899 def : Pat<(or RC:$src1, (shl 1, GR8:$src2)), 1900 (BTS RC:$src1, 1901 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1902 def : Pat<(xor RC:$src1, (shl 1, GR8:$src2)), 1903 (BTC RC:$src1, 1904 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1905 1906 // Similar to above, but removing unneeded masking of the shift amount. 1907 def : Pat<(and RC:$src1, (rotl -2, (ShiftMask GR8:$src2))), 1908 (BTR RC:$src1, 1909 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1910 def : Pat<(or RC:$src1, (shl 1, (ShiftMask GR8:$src2))), 1911 (BTS RC:$src1, 1912 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1913 def : Pat<(xor RC:$src1, (shl 1, (ShiftMask GR8:$src2))), 1914 (BTC RC:$src1, 1915 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1916} 1917 1918defm : one_bit_patterns<GR16, i16, BTR16rr, BTS16rr, BTC16rr, shiftMask16>; 1919defm : one_bit_patterns<GR32, i32, BTR32rr, BTS32rr, BTC32rr, shiftMask32>; 1920defm : one_bit_patterns<GR64, i64, BTR64rr, BTS64rr, BTC64rr, shiftMask64>; 1921 1922//===----------------------------------------------------------------------===// 1923// EFLAGS-defining Patterns 1924//===----------------------------------------------------------------------===// 1925 1926// add reg, reg 1927def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>; 1928def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>; 1929def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>; 1930def : Pat<(add GR64:$src1, GR64:$src2), (ADD64rr GR64:$src1, GR64:$src2)>; 1931 1932// add reg, mem 1933def : Pat<(add GR8:$src1, (loadi8 addr:$src2)), 1934 (ADD8rm GR8:$src1, addr:$src2)>; 1935def : Pat<(add GR16:$src1, (loadi16 addr:$src2)), 1936 (ADD16rm GR16:$src1, addr:$src2)>; 1937def : Pat<(add GR32:$src1, (loadi32 addr:$src2)), 1938 (ADD32rm GR32:$src1, addr:$src2)>; 1939def : Pat<(add GR64:$src1, (loadi64 addr:$src2)), 1940 (ADD64rm GR64:$src1, addr:$src2)>; 1941 1942// add reg, imm 1943def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>; 1944def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>; 1945def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>; 1946def : Pat<(add GR16:$src1, i16immSExt8:$src2), 1947 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>; 1948def : Pat<(add GR32:$src1, i32immSExt8:$src2), 1949 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>; 1950def : Pat<(add GR64:$src1, i64immSExt8:$src2), 1951 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; 1952def : Pat<(add GR64:$src1, i64immSExt32:$src2), 1953 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>; 1954 1955// sub reg, reg 1956def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>; 1957def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>; 1958def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>; 1959def : Pat<(sub GR64:$src1, GR64:$src2), (SUB64rr GR64:$src1, GR64:$src2)>; 1960 1961// sub reg, mem 1962def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)), 1963 (SUB8rm GR8:$src1, addr:$src2)>; 1964def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)), 1965 (SUB16rm GR16:$src1, addr:$src2)>; 1966def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)), 1967 (SUB32rm GR32:$src1, addr:$src2)>; 1968def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)), 1969 (SUB64rm GR64:$src1, addr:$src2)>; 1970 1971// sub reg, imm 1972def : Pat<(sub GR8:$src1, imm:$src2), 1973 (SUB8ri GR8:$src1, imm:$src2)>; 1974def : Pat<(sub GR16:$src1, imm:$src2), 1975 (SUB16ri GR16:$src1, imm:$src2)>; 1976def : Pat<(sub GR32:$src1, imm:$src2), 1977 (SUB32ri GR32:$src1, imm:$src2)>; 1978def : Pat<(sub GR16:$src1, i16immSExt8:$src2), 1979 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>; 1980def : Pat<(sub GR32:$src1, i32immSExt8:$src2), 1981 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>; 1982def : Pat<(sub GR64:$src1, i64immSExt8:$src2), 1983 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>; 1984def : Pat<(sub GR64:$src1, i64immSExt32:$src2), 1985 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>; 1986 1987// sub 0, reg 1988def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>; 1989def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>; 1990def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>; 1991def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>; 1992 1993// mul reg, reg 1994def : Pat<(mul GR16:$src1, GR16:$src2), 1995 (IMUL16rr GR16:$src1, GR16:$src2)>; 1996def : Pat<(mul GR32:$src1, GR32:$src2), 1997 (IMUL32rr GR32:$src1, GR32:$src2)>; 1998def : Pat<(mul GR64:$src1, GR64:$src2), 1999 (IMUL64rr GR64:$src1, GR64:$src2)>; 2000 2001// mul reg, mem 2002def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)), 2003 (IMUL16rm GR16:$src1, addr:$src2)>; 2004def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)), 2005 (IMUL32rm GR32:$src1, addr:$src2)>; 2006def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)), 2007 (IMUL64rm GR64:$src1, addr:$src2)>; 2008 2009// mul reg, imm 2010def : Pat<(mul GR16:$src1, imm:$src2), 2011 (IMUL16rri GR16:$src1, imm:$src2)>; 2012def : Pat<(mul GR32:$src1, imm:$src2), 2013 (IMUL32rri GR32:$src1, imm:$src2)>; 2014def : Pat<(mul GR16:$src1, i16immSExt8:$src2), 2015 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>; 2016def : Pat<(mul GR32:$src1, i32immSExt8:$src2), 2017 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>; 2018def : Pat<(mul GR64:$src1, i64immSExt8:$src2), 2019 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>; 2020def : Pat<(mul GR64:$src1, i64immSExt32:$src2), 2021 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>; 2022 2023// reg = mul mem, imm 2024def : Pat<(mul (loadi16 addr:$src1), imm:$src2), 2025 (IMUL16rmi addr:$src1, imm:$src2)>; 2026def : Pat<(mul (loadi32 addr:$src1), imm:$src2), 2027 (IMUL32rmi addr:$src1, imm:$src2)>; 2028def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2), 2029 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>; 2030def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2), 2031 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>; 2032def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2), 2033 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>; 2034def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2), 2035 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>; 2036 2037// Increment/Decrement reg. 2038// Do not make INC/DEC if it is slow 2039let Predicates = [UseIncDec] in { 2040 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>; 2041 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>; 2042 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>; 2043 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>; 2044 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>; 2045 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>; 2046 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>; 2047 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>; 2048 2049 def : Pat<(X86add_flag_nocf GR8:$src, -1), (DEC8r GR8:$src)>; 2050 def : Pat<(X86add_flag_nocf GR16:$src, -1), (DEC16r GR16:$src)>; 2051 def : Pat<(X86add_flag_nocf GR32:$src, -1), (DEC32r GR32:$src)>; 2052 def : Pat<(X86add_flag_nocf GR64:$src, -1), (DEC64r GR64:$src)>; 2053 def : Pat<(X86sub_flag_nocf GR8:$src, -1), (INC8r GR8:$src)>; 2054 def : Pat<(X86sub_flag_nocf GR16:$src, -1), (INC16r GR16:$src)>; 2055 def : Pat<(X86sub_flag_nocf GR32:$src, -1), (INC32r GR32:$src)>; 2056 def : Pat<(X86sub_flag_nocf GR64:$src, -1), (INC64r GR64:$src)>; 2057} 2058 2059// or reg/reg. 2060def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>; 2061def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>; 2062def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>; 2063def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>; 2064 2065// or reg/mem 2066def : Pat<(or GR8:$src1, (loadi8 addr:$src2)), 2067 (OR8rm GR8:$src1, addr:$src2)>; 2068def : Pat<(or GR16:$src1, (loadi16 addr:$src2)), 2069 (OR16rm GR16:$src1, addr:$src2)>; 2070def : Pat<(or GR32:$src1, (loadi32 addr:$src2)), 2071 (OR32rm GR32:$src1, addr:$src2)>; 2072def : Pat<(or GR64:$src1, (loadi64 addr:$src2)), 2073 (OR64rm GR64:$src1, addr:$src2)>; 2074 2075// or reg/imm 2076def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>; 2077def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>; 2078def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>; 2079def : Pat<(or GR16:$src1, i16immSExt8:$src2), 2080 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>; 2081def : Pat<(or GR32:$src1, i32immSExt8:$src2), 2082 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>; 2083def : Pat<(or GR64:$src1, i64immSExt8:$src2), 2084 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>; 2085def : Pat<(or GR64:$src1, i64immSExt32:$src2), 2086 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>; 2087 2088// xor reg/reg 2089def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>; 2090def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>; 2091def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>; 2092def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>; 2093 2094// xor reg/mem 2095def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)), 2096 (XOR8rm GR8:$src1, addr:$src2)>; 2097def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)), 2098 (XOR16rm GR16:$src1, addr:$src2)>; 2099def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)), 2100 (XOR32rm GR32:$src1, addr:$src2)>; 2101def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)), 2102 (XOR64rm GR64:$src1, addr:$src2)>; 2103 2104// xor reg/imm 2105def : Pat<(xor GR8:$src1, imm:$src2), 2106 (XOR8ri GR8:$src1, imm:$src2)>; 2107def : Pat<(xor GR16:$src1, imm:$src2), 2108 (XOR16ri GR16:$src1, imm:$src2)>; 2109def : Pat<(xor GR32:$src1, imm:$src2), 2110 (XOR32ri GR32:$src1, imm:$src2)>; 2111def : Pat<(xor GR16:$src1, i16immSExt8:$src2), 2112 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; 2113def : Pat<(xor GR32:$src1, i32immSExt8:$src2), 2114 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; 2115def : Pat<(xor GR64:$src1, i64immSExt8:$src2), 2116 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; 2117def : Pat<(xor GR64:$src1, i64immSExt32:$src2), 2118 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; 2119 2120// and reg/reg 2121def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>; 2122def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>; 2123def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>; 2124def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>; 2125 2126// and reg/mem 2127def : Pat<(and GR8:$src1, (loadi8 addr:$src2)), 2128 (AND8rm GR8:$src1, addr:$src2)>; 2129def : Pat<(and GR16:$src1, (loadi16 addr:$src2)), 2130 (AND16rm GR16:$src1, addr:$src2)>; 2131def : Pat<(and GR32:$src1, (loadi32 addr:$src2)), 2132 (AND32rm GR32:$src1, addr:$src2)>; 2133def : Pat<(and GR64:$src1, (loadi64 addr:$src2)), 2134 (AND64rm GR64:$src1, addr:$src2)>; 2135 2136// and reg/imm 2137def : Pat<(and GR8:$src1, imm:$src2), 2138 (AND8ri GR8:$src1, imm:$src2)>; 2139def : Pat<(and GR16:$src1, imm:$src2), 2140 (AND16ri GR16:$src1, imm:$src2)>; 2141def : Pat<(and GR32:$src1, imm:$src2), 2142 (AND32ri GR32:$src1, imm:$src2)>; 2143def : Pat<(and GR16:$src1, i16immSExt8:$src2), 2144 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>; 2145def : Pat<(and GR32:$src1, i32immSExt8:$src2), 2146 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>; 2147def : Pat<(and GR64:$src1, i64immSExt8:$src2), 2148 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>; 2149def : Pat<(and GR64:$src1, i64immSExt32:$src2), 2150 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>; 2151 2152// Bit scan instruction patterns to match explicit zero-undef behavior. 2153def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>; 2154def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>; 2155def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>; 2156def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>; 2157def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>; 2158def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>; 2159 2160// When HasMOVBE is enabled it is possible to get a non-legalized 2161// register-register 16 bit bswap. This maps it to a ROL instruction. 2162let Predicates = [HasMOVBE] in { 2163 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>; 2164} 2165