1//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the various pseudo instructions used by the compiler, 10// as well as Pat patterns used during instruction selection. 11// 12//===----------------------------------------------------------------------===// 13 14//===----------------------------------------------------------------------===// 15// Pattern Matching Support 16 17def GetLo32XForm : SDNodeXForm<imm, [{ 18 // Transformation function: get the low 32 bits. 19 return getI32Imm((uint32_t)N->getZExtValue(), SDLoc(N)); 20}]>; 21 22 23//===----------------------------------------------------------------------===// 24// Random Pseudo Instructions. 25 26// PIC base construction. This expands to code that looks like this: 27// call $next_inst 28// popl %destreg" 29let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP], 30 SchedRW = [WriteJump] in 31 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label), 32 "", []>; 33 34// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into 35// a stack adjustment and the codegen must know that they may modify the stack 36// pointer before prolog-epilog rewriting occurs. 37// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become 38// sub / add which can clobber EFLAGS. 39let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in { 40def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), 41 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3), 42 "#ADJCALLSTACKDOWN", []>, Requires<[NotLP64]>; 43def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), 44 "#ADJCALLSTACKUP", 45 [(X86callseq_end timm:$amt1, timm:$amt2)]>, 46 Requires<[NotLP64]>; 47} 48def : Pat<(X86callseq_start timm:$amt1, timm:$amt2), 49 (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>; 50 51 52// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into 53// a stack adjustment and the codegen must know that they may modify the stack 54// pointer before prolog-epilog rewriting occurs. 55// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become 56// sub / add which can clobber EFLAGS. 57let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in { 58def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), 59 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3), 60 "#ADJCALLSTACKDOWN", []>, Requires<[IsLP64]>; 61def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), 62 "#ADJCALLSTACKUP", 63 [(X86callseq_end timm:$amt1, timm:$amt2)]>, 64 Requires<[IsLP64]>; 65} 66def : Pat<(X86callseq_start timm:$amt1, timm:$amt2), 67 (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>; 68 69let SchedRW = [WriteSystem] in { 70 71// x86-64 va_start lowering magic. 72let hasSideEffects = 1, mayStore = 1, Defs = [EFLAGS] in { 73def VASTART_SAVE_XMM_REGS : I<0, Pseudo, 74 (outs), 75 (ins GR8:$al, i8mem:$regsavefi, variable_ops), 76 "#VASTART_SAVE_XMM_REGS $al, $regsavefi", 77 [(X86vastart_save_xmm_regs GR8:$al, addr:$regsavefi), 78 (implicit EFLAGS)]>; 79} 80 81let usesCustomInserter = 1, Defs = [EFLAGS] in { 82// The VAARG_64 and VAARG_X32 pseudo-instructions take the address of the 83// va_list, and place the address of the next argument into a register. 84let Defs = [EFLAGS] in { 85def VAARG_64 : I<0, Pseudo, 86 (outs GR64:$dst), 87 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align), 88 "#VAARG_64 $dst, $ap, $size, $mode, $align", 89 [(set GR64:$dst, 90 (X86vaarg64 addr:$ap, timm:$size, timm:$mode, timm:$align)), 91 (implicit EFLAGS)]>, Requires<[In64BitMode, IsLP64]>; 92def VAARG_X32 : I<0, Pseudo, 93 (outs GR32:$dst), 94 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align), 95 "#VAARG_X32 $dst, $ap, $size, $mode, $align", 96 [(set GR32:$dst, 97 (X86vaargx32 addr:$ap, timm:$size, timm:$mode, timm:$align)), 98 (implicit EFLAGS)]>, Requires<[In64BitMode, NotLP64]>; 99} 100 101// When using segmented stacks these are lowered into instructions which first 102// check if the current stacklet has enough free memory. If it does, memory is 103// allocated by bumping the stack pointer. Otherwise memory is allocated from 104// the heap. 105 106let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 107def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size), 108 "# variable sized alloca for segmented stacks", 109 [(set GR32:$dst, 110 (X86SegAlloca GR32:$size))]>, 111 Requires<[NotLP64]>; 112 113let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 114def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size), 115 "# variable sized alloca for segmented stacks", 116 [(set GR64:$dst, 117 (X86SegAlloca GR64:$size))]>, 118 Requires<[In64BitMode]>; 119 120// To protect against stack clash, dynamic allocation should perform a memory 121// probe at each page. 122 123let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 124def PROBED_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size), 125 "# variable sized alloca with probing", 126 [(set GR32:$dst, 127 (X86ProbedAlloca GR32:$size))]>, 128 Requires<[NotLP64]>; 129 130let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 131def PROBED_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size), 132 "# variable sized alloca with probing", 133 [(set GR64:$dst, 134 (X86ProbedAlloca GR64:$size))]>, 135 Requires<[In64BitMode]>; 136} 137 138let hasNoSchedulingInfo = 1 in 139def STACKALLOC_W_PROBING : I<0, Pseudo, (outs), (ins i64imm:$stacksize), 140 "# fixed size alloca with probing", 141 []>; 142 143// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows 144// targets. These calls are needed to probe the stack when allocating more than 145// 4k bytes in one go. Touching the stack at 4K increments is necessary to 146// ensure that the guard pages used by the OS virtual memory manager are 147// allocated in correct sequence. 148// The main point of having separate instruction are extra unmodelled effects 149// (compared to ordinary calls) like stack pointer change. 150 151let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 152def WIN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size), 153 "# dynamic stack allocation", 154 [(X86WinAlloca GR32:$size)]>, 155 Requires<[NotLP64]>; 156 157let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 158def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size), 159 "# dynamic stack allocation", 160 [(X86WinAlloca GR64:$size)]>, 161 Requires<[In64BitMode]>; 162} // SchedRW 163 164// These instructions XOR the frame pointer into a GPR. They are used in some 165// stack protection schemes. These are post-RA pseudos because we only know the 166// frame register after register allocation. 167let Constraints = "$src = $dst", isMoveImm = 1, isPseudo = 1, Defs = [EFLAGS] in { 168 def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src), 169 "xorl\t$$FP, $src", []>, 170 Requires<[NotLP64]>, Sched<[WriteALU]>; 171 def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src), 172 "xorq\t$$FP $src", []>, 173 Requires<[In64BitMode]>, Sched<[WriteALU]>; 174} 175 176//===----------------------------------------------------------------------===// 177// EH Pseudo Instructions 178// 179let SchedRW = [WriteSystem] in { 180let isTerminator = 1, isReturn = 1, isBarrier = 1, 181 hasCtrlDep = 1, isCodeGenOnly = 1 in { 182def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr), 183 "ret\t#eh_return, addr: $addr", 184 [(X86ehret GR32:$addr)]>, Sched<[WriteJumpLd]>; 185 186} 187 188let isTerminator = 1, isReturn = 1, isBarrier = 1, 189 hasCtrlDep = 1, isCodeGenOnly = 1 in { 190def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), 191 "ret\t#eh_return, addr: $addr", 192 [(X86ehret GR64:$addr)]>, Sched<[WriteJumpLd]>; 193 194} 195 196let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, 197 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1 in { 198 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET", [(cleanupret)]>; 199 200 // CATCHRET needs a custom inserter for SEH. 201 let usesCustomInserter = 1 in 202 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from), 203 "# CATCHRET", 204 [(catchret bb:$dst, bb:$from)]>; 205} 206 207let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1, 208 usesCustomInserter = 1 in { 209 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf), 210 "#EH_SJLJ_SETJMP32", 211 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>, 212 Requires<[Not64BitMode]>; 213 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf), 214 "#EH_SJLJ_SETJMP64", 215 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>, 216 Requires<[In64BitMode]>; 217 let isTerminator = 1 in { 218 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf), 219 "#EH_SJLJ_LONGJMP32", 220 [(X86eh_sjlj_longjmp addr:$buf)]>, 221 Requires<[Not64BitMode]>; 222 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf), 223 "#EH_SJLJ_LONGJMP64", 224 [(X86eh_sjlj_longjmp addr:$buf)]>, 225 Requires<[In64BitMode]>; 226 } 227} 228 229let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in { 230 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst), 231 "#EH_SjLj_Setup\t$dst", []>; 232} 233} // SchedRW 234 235//===----------------------------------------------------------------------===// 236// Pseudo instructions used by unwind info. 237// 238let isPseudo = 1, SchedRW = [WriteSystem] in { 239 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg), 240 "#SEH_PushReg $reg", []>; 241 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst), 242 "#SEH_SaveReg $reg, $dst", []>; 243 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst), 244 "#SEH_SaveXMM $reg, $dst", []>; 245 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size), 246 "#SEH_StackAlloc $size", []>; 247 def SEH_StackAlign : I<0, Pseudo, (outs), (ins i32imm:$align), 248 "#SEH_StackAlign $align", []>; 249 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset), 250 "#SEH_SetFrame $reg, $offset", []>; 251 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode), 252 "#SEH_PushFrame $mode", []>; 253 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins), 254 "#SEH_EndPrologue", []>; 255 def SEH_Epilogue : I<0, Pseudo, (outs), (ins), 256 "#SEH_Epilogue", []>; 257} 258 259//===----------------------------------------------------------------------===// 260// Pseudo instructions used by segmented stacks. 261// 262 263// This is lowered into a RET instruction by MCInstLower. We need 264// this so that we don't have to have a MachineBasicBlock which ends 265// with a RET and also has successors. 266let isPseudo = 1, SchedRW = [WriteJumpLd] in { 267def MORESTACK_RET: I<0, Pseudo, (outs), (ins), "", []>; 268 269// This instruction is lowered to a RET followed by a MOV. The two 270// instructions are not generated on a higher level since then the 271// verifier sees a MachineBasicBlock ending with a non-terminator. 272def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), "", []>; 273} 274 275//===----------------------------------------------------------------------===// 276// Alias Instructions 277//===----------------------------------------------------------------------===// 278 279// Alias instruction mapping movr0 to xor. 280// FIXME: remove when we can teach regalloc that xor reg, reg is ok. 281let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1, 282 isPseudo = 1, isMoveImm = 1, AddedComplexity = 10 in 283def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 284 [(set GR32:$dst, 0)]>, Sched<[WriteZero]>; 285 286// Other widths can also make use of the 32-bit xor, which may have a smaller 287// encoding and avoid partial register updates. 288let AddedComplexity = 10 in { 289def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>; 290def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>; 291def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>; 292} 293 294let Predicates = [OptForSize, Not64BitMode], 295 AddedComplexity = 10 in { 296 let SchedRW = [WriteALU] in { 297 // Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC, 298 // which only require 3 bytes compared to MOV32ri which requires 5. 299 let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in { 300 def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 301 [(set GR32:$dst, 1)]>; 302 def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 303 [(set GR32:$dst, -1)]>; 304 } 305 } // SchedRW 306 307 // MOV16ri is 4 bytes, so the instructions above are smaller. 308 def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>; 309 def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>; 310} 311 312let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5, 313 SchedRW = [WriteALU] in { 314// AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1. 315def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "", 316 [(set GR32:$dst, i32immSExt8:$src)]>, 317 Requires<[OptForMinSize, NotWin64WithoutFP]>; 318def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "", 319 [(set GR64:$dst, i64immSExt8:$src)]>, 320 Requires<[OptForMinSize, NotWin64WithoutFP]>; 321} 322 323// Materialize i64 constant where top 32-bits are zero. This could theoretically 324// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however 325// that would make it more difficult to rematerialize. 326let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1, 327 isPseudo = 1, SchedRW = [WriteMove] in 328def MOV32ri64 : I<0, Pseudo, (outs GR64:$dst), (ins i64i32imm:$src), "", 329 [(set GR64:$dst, i64immZExt32:$src)]>; 330 331// This 64-bit pseudo-move can also be used for labels in the x86-64 small code 332// model. 333def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [X86Wrapper]>; 334def : Pat<(i64 mov64imm32:$src), (MOV32ri64 mov64imm32:$src)>; 335 336// Use sbb to materialize carry bit. 337let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteADC], 338 hasSideEffects = 0 in { 339// FIXME: These are pseudo ops that should be replaced with Pat<> patterns. 340// However, Pat<> can't replicate the destination reg into the inputs of the 341// result. 342def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "", []>; 343def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "", []>; 344} // isCodeGenOnly 345 346//===----------------------------------------------------------------------===// 347// String Pseudo Instructions 348// 349let SchedRW = [WriteMicrocoded] in { 350let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in { 351def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), 352 "{rep;movsb (%esi), %es:(%edi)|rep movsb es:[edi], [esi]}", 353 [(X86rep_movs i8)]>, REP, AdSize32, 354 Requires<[NotLP64]>; 355def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), 356 "{rep;movsw (%esi), %es:(%edi)|rep movsw es:[edi], [esi]}", 357 [(X86rep_movs i16)]>, REP, AdSize32, OpSize16, 358 Requires<[NotLP64]>; 359def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), 360 "{rep;movsl (%esi), %es:(%edi)|rep movsd es:[edi], [esi]}", 361 [(X86rep_movs i32)]>, REP, AdSize32, OpSize32, 362 Requires<[NotLP64]>; 363def REP_MOVSQ_32 : RI<0xA5, RawFrm, (outs), (ins), 364 "{rep;movsq (%esi), %es:(%edi)|rep movsq es:[edi], [esi]}", 365 [(X86rep_movs i64)]>, REP, AdSize32, 366 Requires<[NotLP64, In64BitMode]>; 367} 368 369let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in { 370def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), 371 "{rep;movsb (%rsi), %es:(%rdi)|rep movsb es:[rdi], [rsi]}", 372 [(X86rep_movs i8)]>, REP, AdSize64, 373 Requires<[IsLP64]>; 374def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), 375 "{rep;movsw (%rsi), %es:(%rdi)|rep movsw es:[rdi], [rsi]}", 376 [(X86rep_movs i16)]>, REP, AdSize64, OpSize16, 377 Requires<[IsLP64]>; 378def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), 379 "{rep;movsl (%rsi), %es:(%rdi)|rep movsdi es:[rdi], [rsi]}", 380 [(X86rep_movs i32)]>, REP, AdSize64, OpSize32, 381 Requires<[IsLP64]>; 382def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), 383 "{rep;movsq (%rsi), %es:(%rdi)|rep movsq es:[rdi], [rsi]}", 384 [(X86rep_movs i64)]>, REP, AdSize64, 385 Requires<[IsLP64]>; 386} 387 388// FIXME: Should use "(X86rep_stos AL)" as the pattern. 389let Defs = [ECX,EDI], isCodeGenOnly = 1 in { 390 let Uses = [AL,ECX,EDI] in 391 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), 392 "{rep;stosb %al, %es:(%edi)|rep stosb es:[edi], al}", 393 [(X86rep_stos i8)]>, REP, AdSize32, 394 Requires<[NotLP64]>; 395 let Uses = [AX,ECX,EDI] in 396 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), 397 "{rep;stosw %ax, %es:(%edi)|rep stosw es:[edi], ax}", 398 [(X86rep_stos i16)]>, REP, AdSize32, OpSize16, 399 Requires<[NotLP64]>; 400 let Uses = [EAX,ECX,EDI] in 401 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), 402 "{rep;stosl %eax, %es:(%edi)|rep stosd es:[edi], eax}", 403 [(X86rep_stos i32)]>, REP, AdSize32, OpSize32, 404 Requires<[NotLP64]>; 405 let Uses = [RAX,RCX,RDI] in 406 def REP_STOSQ_32 : RI<0xAB, RawFrm, (outs), (ins), 407 "{rep;stosq %rax, %es:(%edi)|rep stosq es:[edi], rax}", 408 [(X86rep_stos i64)]>, REP, AdSize32, 409 Requires<[NotLP64, In64BitMode]>; 410} 411 412let Defs = [RCX,RDI], isCodeGenOnly = 1 in { 413 let Uses = [AL,RCX,RDI] in 414 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), 415 "{rep;stosb %al, %es:(%rdi)|rep stosb es:[rdi], al}", 416 [(X86rep_stos i8)]>, REP, AdSize64, 417 Requires<[IsLP64]>; 418 let Uses = [AX,RCX,RDI] in 419 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), 420 "{rep;stosw %ax, %es:(%rdi)|rep stosw es:[rdi], ax}", 421 [(X86rep_stos i16)]>, REP, AdSize64, OpSize16, 422 Requires<[IsLP64]>; 423 let Uses = [RAX,RCX,RDI] in 424 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), 425 "{rep;stosl %eax, %es:(%rdi)|rep stosd es:[rdi], eax}", 426 [(X86rep_stos i32)]>, REP, AdSize64, OpSize32, 427 Requires<[IsLP64]>; 428 429 let Uses = [RAX,RCX,RDI] in 430 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), 431 "{rep;stosq %rax, %es:(%rdi)|rep stosq es:[rdi], rax}", 432 [(X86rep_stos i64)]>, REP, AdSize64, 433 Requires<[IsLP64]>; 434} 435} // SchedRW 436 437//===----------------------------------------------------------------------===// 438// Thread Local Storage Instructions 439// 440let SchedRW = [WriteSystem] in { 441 442// ELF TLS Support 443// All calls clobber the non-callee saved registers. ESP is marked as 444// a use to prevent stack-pointer assignments that appear immediately 445// before calls from potentially appearing dead. 446let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7, 447 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7, 448 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 449 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 450 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF], 451 usesCustomInserter = 1, Uses = [ESP, SSP] in { 452def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 453 "# TLS_addr32", 454 [(X86tlsaddr tls32addr:$sym)]>, 455 Requires<[Not64BitMode]>; 456def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 457 "# TLS_base_addr32", 458 [(X86tlsbaseaddr tls32baseaddr:$sym)]>, 459 Requires<[Not64BitMode]>; 460} 461 462// All calls clobber the non-callee saved registers. RSP is marked as 463// a use to prevent stack-pointer assignments that appear immediately 464// before calls from potentially appearing dead. 465let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, 466 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7, 467 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7, 468 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 469 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 470 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF], 471 usesCustomInserter = 1, Uses = [RSP, SSP] in { 472def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 473 "# TLS_addr64", 474 [(X86tlsaddr tls64addr:$sym)]>, 475 Requires<[In64BitMode, IsLP64]>; 476def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 477 "# TLS_base_addr64", 478 [(X86tlsbaseaddr tls64baseaddr:$sym)]>, 479 Requires<[In64BitMode, IsLP64]>; 480def TLS_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 481 "# TLS_addrX32", 482 [(X86tlsaddr tls32addr:$sym)]>, 483 Requires<[In64BitMode, NotLP64]>; 484def TLS_base_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 485 "# TLS_base_addrX32", 486 [(X86tlsbaseaddr tls32baseaddr:$sym)]>, 487 Requires<[In64BitMode, NotLP64]>; 488} 489 490// Darwin TLS Support 491// For i386, the address of the thunk is passed on the stack, on return the 492// address of the variable is in %eax. %ecx is trashed during the function 493// call. All other registers are preserved. 494let Defs = [EAX, ECX, EFLAGS, DF], 495 Uses = [ESP, SSP], 496 usesCustomInserter = 1 in 497def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 498 "# TLSCall_32", 499 [(X86TLSCall addr:$sym)]>, 500 Requires<[Not64BitMode]>; 501 502// For x86_64, the address of the thunk is passed in %rdi, but the 503// pseudo directly use the symbol, so do not add an implicit use of 504// %rdi. The lowering will do the right thing with RDI. 505// On return the address of the variable is in %rax. All other 506// registers are preserved. 507let Defs = [RAX, EFLAGS, DF], 508 Uses = [RSP, SSP], 509 usesCustomInserter = 1 in 510def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 511 "# TLSCall_64", 512 [(X86TLSCall addr:$sym)]>, 513 Requires<[In64BitMode]>; 514} // SchedRW 515 516//===----------------------------------------------------------------------===// 517// Conditional Move Pseudo Instructions 518 519// CMOV* - Used to implement the SELECT DAG operation. Expanded after 520// instruction selection into a branch sequence. 521multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> { 522 def CMOV#NAME : I<0, Pseudo, 523 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond), 524 "#CMOV_"#NAME#" PSEUDO!", 525 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, timm:$cond, 526 EFLAGS)))]>; 527} 528 529let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in { 530 // X86 doesn't have 8-bit conditional moves. Use a customInserter to 531 // emit control flow. An alternative to this is to mark i8 SELECT as Promote, 532 // however that requires promoting the operands, and can induce additional 533 // i8 register pressure. 534 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>; 535 536 let Predicates = [NoCMov] in { 537 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>; 538 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>; 539 } // Predicates = [NoCMov] 540 541 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no 542 // SSE1/SSE2. 543 let Predicates = [FPStackf32] in 544 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>; 545 546 let Predicates = [FPStackf64] in 547 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>; 548 549 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>; 550 551 let Predicates = [HasMMX] in 552 defm _VR64 : CMOVrr_PSEUDO<VR64, x86mmx>; 553 554 let Predicates = [HasSSE1,NoAVX512] in 555 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>; 556 let Predicates = [HasSSE2,NoAVX512] in 557 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>; 558 let Predicates = [HasAVX512] in { 559 defm _FR32X : CMOVrr_PSEUDO<FR32X, f32>; 560 defm _FR64X : CMOVrr_PSEUDO<FR64X, f64>; 561 } 562 let Predicates = [NoVLX] in { 563 defm _VR128 : CMOVrr_PSEUDO<VR128, v2i64>; 564 defm _VR256 : CMOVrr_PSEUDO<VR256, v4i64>; 565 } 566 let Predicates = [HasVLX] in { 567 defm _VR128X : CMOVrr_PSEUDO<VR128X, v2i64>; 568 defm _VR256X : CMOVrr_PSEUDO<VR256X, v4i64>; 569 } 570 defm _VR512 : CMOVrr_PSEUDO<VR512, v8i64>; 571 defm _VK1 : CMOVrr_PSEUDO<VK1, v1i1>; 572 defm _VK2 : CMOVrr_PSEUDO<VK2, v2i1>; 573 defm _VK4 : CMOVrr_PSEUDO<VK4, v4i1>; 574 defm _VK8 : CMOVrr_PSEUDO<VK8, v8i1>; 575 defm _VK16 : CMOVrr_PSEUDO<VK16, v16i1>; 576 defm _VK32 : CMOVrr_PSEUDO<VK32, v32i1>; 577 defm _VK64 : CMOVrr_PSEUDO<VK64, v64i1>; 578} // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] 579 580def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 581 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 582 583let Predicates = [NoVLX] in { 584 def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 585 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 586 def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 587 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 588 def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 589 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 590 def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 591 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 592 def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 593 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 594 595 def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 596 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 597 def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 598 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 599 def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 600 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 601 def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 602 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 603 def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 604 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 605} 606let Predicates = [HasVLX] in { 607 def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 608 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 609 def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 610 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 611 def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 612 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 613 def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 614 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 615 def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 616 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 617 618 def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 619 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 620 def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 621 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 622 def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 623 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 624 def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 625 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 626 def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 627 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 628} 629 630def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 631 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 632def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 633 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 634def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 635 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 636def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 637 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 638def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 639 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 640 641//===----------------------------------------------------------------------===// 642// Normal-Instructions-With-Lock-Prefix Pseudo Instructions 643//===----------------------------------------------------------------------===// 644 645// FIXME: Use normal instructions and add lock prefix dynamically. 646 647// Memory barriers 648 649let isCodeGenOnly = 1, Defs = [EFLAGS] in 650def OR32mi8Locked : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$zero), 651 "or{l}\t{$zero, $dst|$dst, $zero}", []>, 652 Requires<[Not64BitMode]>, OpSize32, LOCK, 653 Sched<[WriteALURMW]>; 654 655let hasSideEffects = 1 in 656def Int_MemBarrier : I<0, Pseudo, (outs), (ins), 657 "#MEMBARRIER", 658 [(X86MemBarrier)]>, Sched<[WriteLoad]>; 659 660// RegOpc corresponds to the mr version of the instruction 661// ImmOpc corresponds to the mi version of the instruction 662// ImmOpc8 corresponds to the mi8 version of the instruction 663// ImmMod corresponds to the instruction format of the mi and mi8 versions 664multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8, 665 Format ImmMod, SDNode Op, string mnemonic> { 666let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1, 667 SchedRW = [WriteALURMW] in { 668 669def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 670 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 }, 671 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2), 672 !strconcat(mnemonic, "{b}\t", 673 "{$src2, $dst|$dst, $src2}"), 674 [(set EFLAGS, (Op addr:$dst, GR8:$src2))]>, LOCK; 675 676def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 677 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 678 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), 679 !strconcat(mnemonic, "{w}\t", 680 "{$src2, $dst|$dst, $src2}"), 681 [(set EFLAGS, (Op addr:$dst, GR16:$src2))]>, 682 OpSize16, LOCK; 683 684def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 685 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 686 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), 687 !strconcat(mnemonic, "{l}\t", 688 "{$src2, $dst|$dst, $src2}"), 689 [(set EFLAGS, (Op addr:$dst, GR32:$src2))]>, 690 OpSize32, LOCK; 691 692def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 693 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 694 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), 695 !strconcat(mnemonic, "{q}\t", 696 "{$src2, $dst|$dst, $src2}"), 697 [(set EFLAGS, (Op addr:$dst, GR64:$src2))]>, LOCK; 698 699// NOTE: These are order specific, we want the mi8 forms to be listed 700// first so that they are slightly preferred to the mi forms. 701def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 702 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 703 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2), 704 !strconcat(mnemonic, "{w}\t", 705 "{$src2, $dst|$dst, $src2}"), 706 [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))]>, 707 OpSize16, LOCK; 708 709def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 710 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 711 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2), 712 !strconcat(mnemonic, "{l}\t", 713 "{$src2, $dst|$dst, $src2}"), 714 [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))]>, 715 OpSize32, LOCK; 716 717def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 718 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 719 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2), 720 !strconcat(mnemonic, "{q}\t", 721 "{$src2, $dst|$dst, $src2}"), 722 [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))]>, 723 LOCK; 724 725def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 726 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 }, 727 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2), 728 !strconcat(mnemonic, "{b}\t", 729 "{$src2, $dst|$dst, $src2}"), 730 [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))]>, LOCK; 731 732def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 733 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 734 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2), 735 !strconcat(mnemonic, "{w}\t", 736 "{$src2, $dst|$dst, $src2}"), 737 [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))]>, 738 OpSize16, LOCK; 739 740def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 741 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 742 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2), 743 !strconcat(mnemonic, "{l}\t", 744 "{$src2, $dst|$dst, $src2}"), 745 [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))]>, 746 OpSize32, LOCK; 747 748def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 749 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 750 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2), 751 !strconcat(mnemonic, "{q}\t", 752 "{$src2, $dst|$dst, $src2}"), 753 [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))]>, 754 LOCK; 755} 756 757} 758 759defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">; 760defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">; 761defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">; 762defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">; 763defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">; 764 765def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs), 766 (X86lock_add node:$lhs, node:$rhs), [{ 767 return hasNoCarryFlagUses(SDValue(N, 0)); 768}]>; 769 770def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs), 771 (X86lock_sub node:$lhs, node:$rhs), [{ 772 return hasNoCarryFlagUses(SDValue(N, 0)); 773}]>; 774 775let Predicates = [UseIncDec] in { 776 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1, 777 SchedRW = [WriteALURMW] in { 778 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), 779 "inc{b}\t$dst", 780 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i8 1)))]>, 781 LOCK; 782 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), 783 "inc{w}\t$dst", 784 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i16 1)))]>, 785 OpSize16, LOCK; 786 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), 787 "inc{l}\t$dst", 788 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i32 1)))]>, 789 OpSize32, LOCK; 790 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), 791 "inc{q}\t$dst", 792 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i64 1)))]>, 793 LOCK; 794 795 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), 796 "dec{b}\t$dst", 797 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i8 1)))]>, 798 LOCK; 799 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), 800 "dec{w}\t$dst", 801 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i16 1)))]>, 802 OpSize16, LOCK; 803 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), 804 "dec{l}\t$dst", 805 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i32 1)))]>, 806 OpSize32, LOCK; 807 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), 808 "dec{q}\t$dst", 809 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i64 1)))]>, 810 LOCK; 811 } 812 813 // Additional patterns for -1 constant. 814 def : Pat<(X86lock_add addr:$dst, (i8 -1)), (LOCK_DEC8m addr:$dst)>; 815 def : Pat<(X86lock_add addr:$dst, (i16 -1)), (LOCK_DEC16m addr:$dst)>; 816 def : Pat<(X86lock_add addr:$dst, (i32 -1)), (LOCK_DEC32m addr:$dst)>; 817 def : Pat<(X86lock_add addr:$dst, (i64 -1)), (LOCK_DEC64m addr:$dst)>; 818 def : Pat<(X86lock_sub addr:$dst, (i8 -1)), (LOCK_INC8m addr:$dst)>; 819 def : Pat<(X86lock_sub addr:$dst, (i16 -1)), (LOCK_INC16m addr:$dst)>; 820 def : Pat<(X86lock_sub addr:$dst, (i32 -1)), (LOCK_INC32m addr:$dst)>; 821 def : Pat<(X86lock_sub addr:$dst, (i64 -1)), (LOCK_INC64m addr:$dst)>; 822} 823 824// Atomic compare and swap. 825multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form, 826 string mnemonic, SDPatternOperator frag> { 827let isCodeGenOnly = 1, SchedRW = [WriteCMPXCHGRMW] in { 828 let Defs = [AL, EFLAGS], Uses = [AL] in 829 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap), 830 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"), 831 [(frag addr:$ptr, GR8:$swap, 1)]>, TB, LOCK; 832 let Defs = [AX, EFLAGS], Uses = [AX] in 833 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap), 834 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"), 835 [(frag addr:$ptr, GR16:$swap, 2)]>, TB, OpSize16, LOCK; 836 let Defs = [EAX, EFLAGS], Uses = [EAX] in 837 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap), 838 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"), 839 [(frag addr:$ptr, GR32:$swap, 4)]>, TB, OpSize32, LOCK; 840 let Defs = [RAX, EFLAGS], Uses = [RAX] in 841 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap), 842 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"), 843 [(frag addr:$ptr, GR64:$swap, 8)]>, TB, LOCK; 844} 845} 846 847let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX], 848 Predicates = [HasCmpxchg8b], SchedRW = [WriteCMPXCHGRMW], 849 isCodeGenOnly = 1, usesCustomInserter = 1 in { 850def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr), 851 "cmpxchg8b\t$ptr", 852 [(X86cas8 addr:$ptr)]>, TB, LOCK; 853} 854 855let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX], 856 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW], 857 isCodeGenOnly = 1, mayLoad = 1, mayStore = 1, hasSideEffects = 0 in { 858def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr), 859 "cmpxchg16b\t$ptr", 860 []>, TB, LOCK; 861} 862 863// This pseudo must be used when the frame uses RBX as 864// the base pointer. Indeed, in such situation RBX is a reserved 865// register and the register allocator will ignore any use/def of 866// it. In other words, the register will not fix the clobbering of 867// RBX that will happen when setting the arguments for the instrucion. 868// 869// Unlike the actual related instruction, we mark that this one 870// defines RBX (instead of using RBX). 871// The rationale is that we will define RBX during the expansion of 872// the pseudo. The argument feeding RBX is rbx_input. 873// 874// The additional argument, $rbx_save, is a temporary register used to 875// save the value of RBX across the actual instruction. 876// 877// To make sure the register assigned to $rbx_save does not interfere with 878// the definition of the actual instruction, we use a definition $dst which 879// is tied to $rbx_save. That way, the live-range of $rbx_save spans across 880// the instruction and we are sure we will have a valid register to restore 881// the value of RBX. 882let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX], 883 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW], 884 isCodeGenOnly = 1, isPseudo = 1, 885 mayLoad = 1, mayStore = 1, hasSideEffects = 0, 886 Constraints = "$rbx_save = $dst" in { 887def LCMPXCHG16B_SAVE_RBX : 888 I<0, Pseudo, (outs GR64:$dst), 889 (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save), "", []>; 890} 891 892// Pseudo instruction that doesn't read/write RBX. Will be turned into either 893// LCMPXCHG16B_SAVE_RBX or LCMPXCHG16B via a custom inserter. 894let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RCX, RDX], 895 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW], 896 isCodeGenOnly = 1, isPseudo = 1, 897 mayLoad = 1, mayStore = 1, hasSideEffects = 0, 898 usesCustomInserter = 1 in { 899def LCMPXCHG16B_NO_RBX : 900 I<0, Pseudo, (outs), (ins i128mem:$ptr, GR64:$rbx_input), "", 901 [(X86cas16 addr:$ptr, GR64:$rbx_input)]>; 902} 903 904// This pseudo must be used when the frame uses RBX/EBX as 905// the base pointer. 906// cf comment for LCMPXCHG16B_SAVE_RBX. 907let Defs = [EBX], Uses = [ECX, EAX], 908 Predicates = [HasMWAITX], SchedRW = [WriteSystem], 909 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst" in { 910def MWAITX_SAVE_RBX : 911 I<0, Pseudo, (outs GR64:$dst), 912 (ins GR32:$ebx_input, GR64:$rbx_save), 913 "mwaitx", 914 []>; 915} 916 917// Pseudo mwaitx instruction to use for custom insertion. 918let Predicates = [HasMWAITX], SchedRW = [WriteSystem], 919 isCodeGenOnly = 1, isPseudo = 1, 920 usesCustomInserter = 1 in { 921def MWAITX : 922 I<0, Pseudo, (outs), (ins GR32:$ecx, GR32:$eax, GR32:$ebx), 923 "mwaitx", 924 [(int_x86_mwaitx GR32:$ecx, GR32:$eax, GR32:$ebx)]>; 925} 926 927 928defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", X86cas>; 929 930// Atomic exchange and add 931multiclass ATOMIC_RMW_BINOP<bits<8> opc8, bits<8> opc, string mnemonic, 932 string frag> { 933 let Constraints = "$val = $dst", Defs = [EFLAGS], mayLoad = 1, mayStore = 1, 934 isCodeGenOnly = 1, SchedRW = [WriteALURMW] in { 935 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst), 936 (ins GR8:$val, i8mem:$ptr), 937 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"), 938 [(set GR8:$dst, 939 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>; 940 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst), 941 (ins GR16:$val, i16mem:$ptr), 942 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"), 943 [(set 944 GR16:$dst, 945 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>, 946 OpSize16; 947 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst), 948 (ins GR32:$val, i32mem:$ptr), 949 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"), 950 [(set 951 GR32:$dst, 952 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>, 953 OpSize32; 954 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst), 955 (ins GR64:$val, i64mem:$ptr), 956 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"), 957 [(set 958 GR64:$dst, 959 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>; 960 } 961} 962 963defm LXADD : ATOMIC_RMW_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add">, TB, LOCK; 964 965/* The following multiclass tries to make sure that in code like 966 * x.store (immediate op x.load(acquire), release) 967 * and 968 * x.store (register op x.load(acquire), release) 969 * an operation directly on memory is generated instead of wasting a register. 970 * It is not automatic as atomic_store/load are only lowered to MOV instructions 971 * extremely late to prevent them from being accidentally reordered in the backend 972 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions) 973 */ 974multiclass RELEASE_BINOP_MI<string Name, SDNode op> { 975 def : Pat<(atomic_store_8 addr:$dst, 976 (op (atomic_load_8 addr:$dst), (i8 imm:$src))), 977 (!cast<Instruction>(Name#"8mi") addr:$dst, imm:$src)>; 978 def : Pat<(atomic_store_16 addr:$dst, 979 (op (atomic_load_16 addr:$dst), (i16 imm:$src))), 980 (!cast<Instruction>(Name#"16mi") addr:$dst, imm:$src)>; 981 def : Pat<(atomic_store_32 addr:$dst, 982 (op (atomic_load_32 addr:$dst), (i32 imm:$src))), 983 (!cast<Instruction>(Name#"32mi") addr:$dst, imm:$src)>; 984 def : Pat<(atomic_store_64 addr:$dst, 985 (op (atomic_load_64 addr:$dst), (i64immSExt32:$src))), 986 (!cast<Instruction>(Name#"64mi32") addr:$dst, (i64immSExt32:$src))>; 987 988 def : Pat<(atomic_store_8 addr:$dst, 989 (op (atomic_load_8 addr:$dst), (i8 GR8:$src))), 990 (!cast<Instruction>(Name#"8mr") addr:$dst, GR8:$src)>; 991 def : Pat<(atomic_store_16 addr:$dst, 992 (op (atomic_load_16 addr:$dst), (i16 GR16:$src))), 993 (!cast<Instruction>(Name#"16mr") addr:$dst, GR16:$src)>; 994 def : Pat<(atomic_store_32 addr:$dst, 995 (op (atomic_load_32 addr:$dst), (i32 GR32:$src))), 996 (!cast<Instruction>(Name#"32mr") addr:$dst, GR32:$src)>; 997 def : Pat<(atomic_store_64 addr:$dst, 998 (op (atomic_load_64 addr:$dst), (i64 GR64:$src))), 999 (!cast<Instruction>(Name#"64mr") addr:$dst, GR64:$src)>; 1000} 1001defm : RELEASE_BINOP_MI<"ADD", add>; 1002defm : RELEASE_BINOP_MI<"AND", and>; 1003defm : RELEASE_BINOP_MI<"OR", or>; 1004defm : RELEASE_BINOP_MI<"XOR", xor>; 1005defm : RELEASE_BINOP_MI<"SUB", sub>; 1006 1007// Atomic load + floating point patterns. 1008// FIXME: This could also handle SIMD operations with *ps and *pd instructions. 1009multiclass ATOMIC_LOAD_FP_BINOP_MI<string Name, SDNode op> { 1010 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 1011 (!cast<Instruction>(Name#"SSrm") FR32:$src1, addr:$src2)>, 1012 Requires<[UseSSE1]>; 1013 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 1014 (!cast<Instruction>("V"#Name#"SSrm") FR32:$src1, addr:$src2)>, 1015 Requires<[UseAVX]>; 1016 def : Pat<(op FR32X:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 1017 (!cast<Instruction>("V"#Name#"SSZrm") FR32X:$src1, addr:$src2)>, 1018 Requires<[HasAVX512]>; 1019 1020 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 1021 (!cast<Instruction>(Name#"SDrm") FR64:$src1, addr:$src2)>, 1022 Requires<[UseSSE1]>; 1023 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 1024 (!cast<Instruction>("V"#Name#"SDrm") FR64:$src1, addr:$src2)>, 1025 Requires<[UseAVX]>; 1026 def : Pat<(op FR64X:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 1027 (!cast<Instruction>("V"#Name#"SDZrm") FR64X:$src1, addr:$src2)>, 1028 Requires<[HasAVX512]>; 1029} 1030defm : ATOMIC_LOAD_FP_BINOP_MI<"ADD", fadd>; 1031// FIXME: Add fsub, fmul, fdiv, ... 1032 1033multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32, 1034 dag dag64> { 1035 def : Pat<(atomic_store_8 addr:$dst, dag8), 1036 (!cast<Instruction>(Name#8m) addr:$dst)>; 1037 def : Pat<(atomic_store_16 addr:$dst, dag16), 1038 (!cast<Instruction>(Name#16m) addr:$dst)>; 1039 def : Pat<(atomic_store_32 addr:$dst, dag32), 1040 (!cast<Instruction>(Name#32m) addr:$dst)>; 1041 def : Pat<(atomic_store_64 addr:$dst, dag64), 1042 (!cast<Instruction>(Name#64m) addr:$dst)>; 1043} 1044 1045let Predicates = [UseIncDec] in { 1046 defm : RELEASE_UNOP<"INC", 1047 (add (atomic_load_8 addr:$dst), (i8 1)), 1048 (add (atomic_load_16 addr:$dst), (i16 1)), 1049 (add (atomic_load_32 addr:$dst), (i32 1)), 1050 (add (atomic_load_64 addr:$dst), (i64 1))>; 1051 defm : RELEASE_UNOP<"DEC", 1052 (add (atomic_load_8 addr:$dst), (i8 -1)), 1053 (add (atomic_load_16 addr:$dst), (i16 -1)), 1054 (add (atomic_load_32 addr:$dst), (i32 -1)), 1055 (add (atomic_load_64 addr:$dst), (i64 -1))>; 1056} 1057 1058defm : RELEASE_UNOP<"NEG", 1059 (ineg (i8 (atomic_load_8 addr:$dst))), 1060 (ineg (i16 (atomic_load_16 addr:$dst))), 1061 (ineg (i32 (atomic_load_32 addr:$dst))), 1062 (ineg (i64 (atomic_load_64 addr:$dst)))>; 1063defm : RELEASE_UNOP<"NOT", 1064 (not (i8 (atomic_load_8 addr:$dst))), 1065 (not (i16 (atomic_load_16 addr:$dst))), 1066 (not (i32 (atomic_load_32 addr:$dst))), 1067 (not (i64 (atomic_load_64 addr:$dst)))>; 1068 1069def : Pat<(atomic_store_8 addr:$dst, (i8 imm:$src)), 1070 (MOV8mi addr:$dst, imm:$src)>; 1071def : Pat<(atomic_store_16 addr:$dst, (i16 imm:$src)), 1072 (MOV16mi addr:$dst, imm:$src)>; 1073def : Pat<(atomic_store_32 addr:$dst, (i32 imm:$src)), 1074 (MOV32mi addr:$dst, imm:$src)>; 1075def : Pat<(atomic_store_64 addr:$dst, (i64immSExt32:$src)), 1076 (MOV64mi32 addr:$dst, i64immSExt32:$src)>; 1077 1078def : Pat<(atomic_store_8 addr:$dst, GR8:$src), 1079 (MOV8mr addr:$dst, GR8:$src)>; 1080def : Pat<(atomic_store_16 addr:$dst, GR16:$src), 1081 (MOV16mr addr:$dst, GR16:$src)>; 1082def : Pat<(atomic_store_32 addr:$dst, GR32:$src), 1083 (MOV32mr addr:$dst, GR32:$src)>; 1084def : Pat<(atomic_store_64 addr:$dst, GR64:$src), 1085 (MOV64mr addr:$dst, GR64:$src)>; 1086 1087def : Pat<(i8 (atomic_load_8 addr:$src)), (MOV8rm addr:$src)>; 1088def : Pat<(i16 (atomic_load_16 addr:$src)), (MOV16rm addr:$src)>; 1089def : Pat<(i32 (atomic_load_32 addr:$src)), (MOV32rm addr:$src)>; 1090def : Pat<(i64 (atomic_load_64 addr:$src)), (MOV64rm addr:$src)>; 1091 1092// Floating point loads/stores. 1093def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1094 (MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>; 1095def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1096 (VMOVSSmr addr:$dst, FR32:$src)>, Requires<[UseAVX]>; 1097def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1098 (VMOVSSZmr addr:$dst, FR32:$src)>, Requires<[HasAVX512]>; 1099 1100def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1101 (MOVSDmr addr:$dst, FR64:$src)>, Requires<[UseSSE2]>; 1102def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1103 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[UseAVX]>; 1104def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1105 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[HasAVX512]>; 1106 1107def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1108 (MOVSSrm_alt addr:$src)>, Requires<[UseSSE1]>; 1109def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1110 (VMOVSSrm_alt addr:$src)>, Requires<[UseAVX]>; 1111def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1112 (VMOVSSZrm_alt addr:$src)>, Requires<[HasAVX512]>; 1113 1114def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1115 (MOVSDrm_alt addr:$src)>, Requires<[UseSSE2]>; 1116def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1117 (VMOVSDrm_alt addr:$src)>, Requires<[UseAVX]>; 1118def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1119 (VMOVSDZrm_alt addr:$src)>, Requires<[HasAVX512]>; 1120 1121//===----------------------------------------------------------------------===// 1122// DAG Pattern Matching Rules 1123//===----------------------------------------------------------------------===// 1124 1125// Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves 1126// binary size compared to a regular MOV, but it introduces an unnecessary 1127// load, so is not suitable for regular or optsize functions. 1128let Predicates = [OptForMinSize] in { 1129def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>; 1130def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>; 1131def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>; 1132def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>; 1133def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>; 1134def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>; 1135} 1136 1137// In kernel code model, we can get the address of a label 1138// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of 1139// the MOV64ri32 should accept these. 1140def : Pat<(i64 (X86Wrapper tconstpool :$dst)), 1141 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>; 1142def : Pat<(i64 (X86Wrapper tjumptable :$dst)), 1143 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>; 1144def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), 1145 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>; 1146def : Pat<(i64 (X86Wrapper texternalsym:$dst)), 1147 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>; 1148def : Pat<(i64 (X86Wrapper mcsym:$dst)), 1149 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>; 1150def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), 1151 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>; 1152 1153// If we have small model and -static mode, it is safe to store global addresses 1154// directly as immediates. FIXME: This is really a hack, the 'imm' predicate 1155// for MOV64mi32 should handle this sort of thing. 1156def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst), 1157 (MOV64mi32 addr:$dst, tconstpool:$src)>, 1158 Requires<[NearData, IsNotPIC]>; 1159def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst), 1160 (MOV64mi32 addr:$dst, tjumptable:$src)>, 1161 Requires<[NearData, IsNotPIC]>; 1162def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst), 1163 (MOV64mi32 addr:$dst, tglobaladdr:$src)>, 1164 Requires<[NearData, IsNotPIC]>; 1165def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst), 1166 (MOV64mi32 addr:$dst, texternalsym:$src)>, 1167 Requires<[NearData, IsNotPIC]>; 1168def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst), 1169 (MOV64mi32 addr:$dst, mcsym:$src)>, 1170 Requires<[NearData, IsNotPIC]>; 1171def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst), 1172 (MOV64mi32 addr:$dst, tblockaddress:$src)>, 1173 Requires<[NearData, IsNotPIC]>; 1174 1175def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>; 1176def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>; 1177 1178// Calls 1179 1180// tls has some funny stuff here... 1181// This corresponds to movabs $foo@tpoff, %rax 1182def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)), 1183 (MOV64ri32 tglobaltlsaddr :$dst)>; 1184// This corresponds to add $foo@tpoff, %rax 1185def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)), 1186 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>; 1187 1188 1189// Direct PC relative function call for small code model. 32-bit displacement 1190// sign extended to 64-bit. 1191def : Pat<(X86call (i64 tglobaladdr:$dst)), 1192 (CALL64pcrel32 tglobaladdr:$dst)>; 1193def : Pat<(X86call (i64 texternalsym:$dst)), 1194 (CALL64pcrel32 texternalsym:$dst)>; 1195 1196def : Pat<(X86call_rvmarker (timm:$sel), (i64 texternalsym:$dst)), 1197 (CALL64pcrel32_RVMARKER timm:$sel, texternalsym:$dst)>; 1198def : Pat<(X86call_rvmarker (timm:$sel), (i64 tglobaladdr:$dst)), 1199 (CALL64pcrel32_RVMARKER timm:$sel, tglobaladdr:$dst)>; 1200 1201 1202// Tailcall stuff. The TCRETURN instructions execute after the epilog, so they 1203// can never use callee-saved registers. That is the purpose of the GR64_TC 1204// register classes. 1205// 1206// The only volatile register that is never used by the calling convention is 1207// %r11. This happens when calling a vararg function with 6 arguments. 1208// 1209// Match an X86tcret that uses less than 7 volatile registers. 1210def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off), 1211 (X86tcret node:$ptr, node:$off), [{ 1212 // X86tcret args: (*chain, ptr, imm, regs..., glue) 1213 unsigned NumRegs = 0; 1214 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i) 1215 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6) 1216 return false; 1217 return true; 1218}]>; 1219 1220def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off), 1221 (TCRETURNri ptr_rc_tailcall:$dst, timm:$off)>, 1222 Requires<[Not64BitMode, NotUseIndirectThunkCalls]>; 1223 1224// FIXME: This is disabled for 32-bit PIC mode because the global base 1225// register which is part of the address mode may be assigned a 1226// callee-saved register. 1227def : Pat<(X86tcret (load addr:$dst), timm:$off), 1228 (TCRETURNmi addr:$dst, timm:$off)>, 1229 Requires<[Not64BitMode, IsNotPIC, NotUseIndirectThunkCalls]>; 1230 1231def : Pat<(X86tcret (i32 tglobaladdr:$dst), timm:$off), 1232 (TCRETURNdi tglobaladdr:$dst, timm:$off)>, 1233 Requires<[NotLP64]>; 1234 1235def : Pat<(X86tcret (i32 texternalsym:$dst), timm:$off), 1236 (TCRETURNdi texternalsym:$dst, timm:$off)>, 1237 Requires<[NotLP64]>; 1238 1239def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off), 1240 (TCRETURNri64 ptr_rc_tailcall:$dst, timm:$off)>, 1241 Requires<[In64BitMode, NotUseIndirectThunkCalls]>; 1242 1243// Don't fold loads into X86tcret requiring more than 6 regs. 1244// There wouldn't be enough scratch registers for base+index. 1245def : Pat<(X86tcret_6regs (load addr:$dst), timm:$off), 1246 (TCRETURNmi64 addr:$dst, timm:$off)>, 1247 Requires<[In64BitMode, NotUseIndirectThunkCalls]>; 1248 1249def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off), 1250 (INDIRECT_THUNK_TCRETURN64 ptr_rc_tailcall:$dst, timm:$off)>, 1251 Requires<[In64BitMode, UseIndirectThunkCalls]>; 1252 1253def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off), 1254 (INDIRECT_THUNK_TCRETURN32 ptr_rc_tailcall:$dst, timm:$off)>, 1255 Requires<[Not64BitMode, UseIndirectThunkCalls]>; 1256 1257def : Pat<(X86tcret (i64 tglobaladdr:$dst), timm:$off), 1258 (TCRETURNdi64 tglobaladdr:$dst, timm:$off)>, 1259 Requires<[IsLP64]>; 1260 1261def : Pat<(X86tcret (i64 texternalsym:$dst), timm:$off), 1262 (TCRETURNdi64 texternalsym:$dst, timm:$off)>, 1263 Requires<[IsLP64]>; 1264 1265// Normal calls, with various flavors of addresses. 1266def : Pat<(X86call (i32 tglobaladdr:$dst)), 1267 (CALLpcrel32 tglobaladdr:$dst)>; 1268def : Pat<(X86call (i32 texternalsym:$dst)), 1269 (CALLpcrel32 texternalsym:$dst)>; 1270def : Pat<(X86call (i32 imm:$dst)), 1271 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>; 1272 1273// Comparisons. 1274 1275// TEST R,R is smaller than CMP R,0 1276def : Pat<(X86cmp GR8:$src1, 0), 1277 (TEST8rr GR8:$src1, GR8:$src1)>; 1278def : Pat<(X86cmp GR16:$src1, 0), 1279 (TEST16rr GR16:$src1, GR16:$src1)>; 1280def : Pat<(X86cmp GR32:$src1, 0), 1281 (TEST32rr GR32:$src1, GR32:$src1)>; 1282def : Pat<(X86cmp GR64:$src1, 0), 1283 (TEST64rr GR64:$src1, GR64:$src1)>; 1284 1285// zextload bool -> zextload byte 1286// i1 stored in one byte in zero-extended form. 1287// Upper bits cleanup should be executed before Store. 1288def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>; 1289def : Pat<(zextloadi16i1 addr:$src), 1290 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1291def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; 1292def : Pat<(zextloadi64i1 addr:$src), 1293 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1294 1295// extload bool -> extload byte 1296// When extloading from 16-bit and smaller memory locations into 64-bit 1297// registers, use zero-extending loads so that the entire 64-bit register is 1298// defined, avoiding partial-register updates. 1299 1300def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>; 1301def : Pat<(extloadi16i1 addr:$src), 1302 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1303def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; 1304def : Pat<(extloadi16i8 addr:$src), 1305 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1306def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>; 1307def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>; 1308 1309// For other extloads, use subregs, since the high contents of the register are 1310// defined after an extload. 1311// NOTE: The extloadi64i32 pattern needs to be first as it will try to form 1312// 32-bit loads for 4 byte aligned i8/i16 loads. 1313def : Pat<(extloadi64i32 addr:$src), 1314 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>; 1315def : Pat<(extloadi64i1 addr:$src), 1316 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1317def : Pat<(extloadi64i8 addr:$src), 1318 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1319def : Pat<(extloadi64i16 addr:$src), 1320 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>; 1321 1322// anyext. Define these to do an explicit zero-extend to 1323// avoid partial-register updates. 1324def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG 1325 (MOVZX32rr8 GR8 :$src), sub_16bit)>; 1326def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>; 1327 1328// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32. 1329def : Pat<(i32 (anyext GR16:$src)), 1330 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>; 1331 1332def : Pat<(i64 (anyext GR8 :$src)), 1333 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>; 1334def : Pat<(i64 (anyext GR16:$src)), 1335 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>; 1336def : Pat<(i64 (anyext GR32:$src)), 1337 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>; 1338 1339// If this is an anyext of the remainder of an 8-bit sdivrem, use a MOVSX 1340// instead of a MOVZX. The sdivrem lowering will emit emit a MOVSX to move 1341// %ah to the lower byte of a register. By using a MOVSX here we allow a 1342// post-isel peephole to merge the two MOVSX instructions into one. 1343def anyext_sdiv : PatFrag<(ops node:$lhs), (anyext node:$lhs),[{ 1344 return (N->getOperand(0).getOpcode() == ISD::SDIVREM && 1345 N->getOperand(0).getResNo() == 1); 1346}]>; 1347def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>; 1348 1349// Any instruction that defines a 32-bit result leaves the high half of the 1350// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may 1351// be copying from a truncate. AssertSext/AssertZext/AssertAlign aren't saying 1352// anything about the upper 32 bits, they're probably just qualifying a 1353// CopyFromReg. FREEZE may be coming from a a truncate. Any other 32-bit 1354// operation will zero-extend up to 64 bits. 1355def def32 : PatLeaf<(i32 GR32:$src), [{ 1356 return N->getOpcode() != ISD::TRUNCATE && 1357 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG && 1358 N->getOpcode() != ISD::CopyFromReg && 1359 N->getOpcode() != ISD::AssertSext && 1360 N->getOpcode() != ISD::AssertZext && 1361 N->getOpcode() != ISD::AssertAlign && 1362 N->getOpcode() != ISD::FREEZE; 1363}]>; 1364 1365// In the case of a 32-bit def that is known to implicitly zero-extend, 1366// we can use a SUBREG_TO_REG. 1367def : Pat<(i64 (zext def32:$src)), 1368 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; 1369def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)), 1370 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; 1371 1372//===----------------------------------------------------------------------===// 1373// Pattern match OR as ADD 1374//===----------------------------------------------------------------------===// 1375 1376// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be 1377// 3-addressified into an LEA instruction to avoid copies. However, we also 1378// want to finally emit these instructions as an or at the end of the code 1379// generator to make the generated code easier to read. To do this, we select 1380// into "disjoint bits" pseudo ops. 1381 1382// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero. 1383def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{ 1384 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1))) 1385 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue()); 1386 1387 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0); 1388 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0); 1389 return (~Known0.Zero & ~Known1.Zero) == 0; 1390}]>; 1391 1392 1393// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits. 1394// Try this before the selecting to OR. 1395let SchedRW = [WriteALU] in { 1396 1397let isConvertibleToThreeAddress = 1, isPseudo = 1, 1398 Constraints = "$src1 = $dst", Defs = [EFLAGS] in { 1399let isCommutable = 1 in { 1400def ADD8rr_DB : I<0, Pseudo, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), 1401 "", // orb/addb REG, REG 1402 [(set GR8:$dst, (or_is_add GR8:$src1, GR8:$src2))]>; 1403def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), 1404 "", // orw/addw REG, REG 1405 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>; 1406def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), 1407 "", // orl/addl REG, REG 1408 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>; 1409def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), 1410 "", // orq/addq REG, REG 1411 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>; 1412} // isCommutable 1413 1414// NOTE: These are order specific, we want the ri8 forms to be listed 1415// first so that they are slightly preferred to the ri forms. 1416 1417def ADD8ri_DB : I<0, Pseudo, 1418 (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2), 1419 "", // orb/addb REG, imm8 1420 [(set GR8:$dst, (or_is_add GR8:$src1, imm:$src2))]>; 1421def ADD16ri8_DB : I<0, Pseudo, 1422 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2), 1423 "", // orw/addw REG, imm8 1424 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>; 1425def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2), 1426 "", // orw/addw REG, imm 1427 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>; 1428 1429def ADD32ri8_DB : I<0, Pseudo, 1430 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2), 1431 "", // orl/addl REG, imm8 1432 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>; 1433def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2), 1434 "", // orl/addl REG, imm 1435 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>; 1436 1437 1438def ADD64ri8_DB : I<0, Pseudo, 1439 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), 1440 "", // orq/addq REG, imm8 1441 [(set GR64:$dst, (or_is_add GR64:$src1, 1442 i64immSExt8:$src2))]>; 1443def ADD64ri32_DB : I<0, Pseudo, 1444 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), 1445 "", // orq/addq REG, imm 1446 [(set GR64:$dst, (or_is_add GR64:$src1, 1447 i64immSExt32:$src2))]>; 1448} 1449} // AddedComplexity, SchedRW 1450 1451//===----------------------------------------------------------------------===// 1452// Pattern match SUB as XOR 1453//===----------------------------------------------------------------------===// 1454 1455// An immediate in the LHS of a subtract can't be encoded in the instruction. 1456// If there is no possibility of a borrow we can use an XOR instead of a SUB 1457// to enable the immediate to be folded. 1458// TODO: Move this to a DAG combine? 1459 1460def sub_is_xor : PatFrag<(ops node:$lhs, node:$rhs), (sub node:$lhs, node:$rhs),[{ 1461 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 1462 KnownBits Known = CurDAG->computeKnownBits(N->getOperand(1)); 1463 1464 // If all possible ones in the RHS are set in the LHS then there can't be 1465 // a borrow and we can use xor. 1466 return (~Known.Zero).isSubsetOf(CN->getAPIntValue()); 1467 } 1468 1469 return false; 1470}]>; 1471 1472let AddedComplexity = 5 in { 1473def : Pat<(sub_is_xor imm:$src2, GR8:$src1), 1474 (XOR8ri GR8:$src1, imm:$src2)>; 1475def : Pat<(sub_is_xor i16immSExt8:$src2, GR16:$src1), 1476 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; 1477def : Pat<(sub_is_xor imm:$src2, GR16:$src1), 1478 (XOR16ri GR16:$src1, imm:$src2)>; 1479def : Pat<(sub_is_xor i32immSExt8:$src2, GR32:$src1), 1480 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; 1481def : Pat<(sub_is_xor imm:$src2, GR32:$src1), 1482 (XOR32ri GR32:$src1, imm:$src2)>; 1483def : Pat<(sub_is_xor i64immSExt8:$src2, GR64:$src1), 1484 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; 1485def : Pat<(sub_is_xor i64immSExt32:$src2, GR64:$src1), 1486 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; 1487} 1488 1489//===----------------------------------------------------------------------===// 1490// Some peepholes 1491//===----------------------------------------------------------------------===// 1492 1493// Odd encoding trick: -128 fits into an 8-bit immediate field while 1494// +128 doesn't, so in this special case use a sub instead of an add. 1495def : Pat<(add GR16:$src1, 128), 1496 (SUB16ri8 GR16:$src1, -128)>; 1497def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst), 1498 (SUB16mi8 addr:$dst, -128)>; 1499 1500def : Pat<(add GR32:$src1, 128), 1501 (SUB32ri8 GR32:$src1, -128)>; 1502def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst), 1503 (SUB32mi8 addr:$dst, -128)>; 1504 1505def : Pat<(add GR64:$src1, 128), 1506 (SUB64ri8 GR64:$src1, -128)>; 1507def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst), 1508 (SUB64mi8 addr:$dst, -128)>; 1509 1510def : Pat<(X86add_flag_nocf GR16:$src1, 128), 1511 (SUB16ri8 GR16:$src1, -128)>; 1512def : Pat<(X86add_flag_nocf GR32:$src1, 128), 1513 (SUB32ri8 GR32:$src1, -128)>; 1514def : Pat<(X86add_flag_nocf GR64:$src1, 128), 1515 (SUB64ri8 GR64:$src1, -128)>; 1516 1517// The same trick applies for 32-bit immediate fields in 64-bit 1518// instructions. 1519def : Pat<(add GR64:$src1, 0x0000000080000000), 1520 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; 1521def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst), 1522 (SUB64mi32 addr:$dst, 0xffffffff80000000)>; 1523 1524def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000), 1525 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; 1526 1527// To avoid needing to materialize an immediate in a register, use a 32-bit and 1528// with implicit zero-extension instead of a 64-bit and if the immediate has at 1529// least 32 bits of leading zeros. If in addition the last 32 bits can be 1530// represented with a sign extension of a 8 bit constant, use that. 1531// This can also reduce instruction size by eliminating the need for the REX 1532// prefix. 1533 1534// AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32. 1535let AddedComplexity = 1 in { 1536def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm), 1537 (SUBREG_TO_REG 1538 (i64 0), 1539 (AND32ri8 1540 (EXTRACT_SUBREG GR64:$src, sub_32bit), 1541 (i32 (GetLo32XForm imm:$imm))), 1542 sub_32bit)>; 1543 1544def : Pat<(and GR64:$src, i64immZExt32:$imm), 1545 (SUBREG_TO_REG 1546 (i64 0), 1547 (AND32ri 1548 (EXTRACT_SUBREG GR64:$src, sub_32bit), 1549 (i32 (GetLo32XForm imm:$imm))), 1550 sub_32bit)>; 1551} // AddedComplexity = 1 1552 1553 1554// AddedComplexity is needed due to the increased complexity on the 1555// i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all 1556// the MOVZX patterns keeps thems together in DAGIsel tables. 1557let AddedComplexity = 1 in { 1558// r & (2^16-1) ==> movz 1559def : Pat<(and GR32:$src1, 0xffff), 1560 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>; 1561// r & (2^8-1) ==> movz 1562def : Pat<(and GR32:$src1, 0xff), 1563 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>; 1564// r & (2^8-1) ==> movz 1565def : Pat<(and GR16:$src1, 0xff), 1566 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)), 1567 sub_16bit)>; 1568 1569// r & (2^32-1) ==> movz 1570def : Pat<(and GR64:$src, 0x00000000FFFFFFFF), 1571 (SUBREG_TO_REG (i64 0), 1572 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)), 1573 sub_32bit)>; 1574// r & (2^16-1) ==> movz 1575def : Pat<(and GR64:$src, 0xffff), 1576 (SUBREG_TO_REG (i64 0), 1577 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))), 1578 sub_32bit)>; 1579// r & (2^8-1) ==> movz 1580def : Pat<(and GR64:$src, 0xff), 1581 (SUBREG_TO_REG (i64 0), 1582 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))), 1583 sub_32bit)>; 1584} // AddedComplexity = 1 1585 1586 1587// Try to use BTS/BTR/BTC for single bit operations on the upper 32-bits. 1588 1589def BTRXForm : SDNodeXForm<imm, [{ 1590 // Transformation function: Find the lowest 0. 1591 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingOnes(), SDLoc(N)); 1592}]>; 1593 1594def BTCBTSXForm : SDNodeXForm<imm, [{ 1595 // Transformation function: Find the lowest 1. 1596 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingZeros(), SDLoc(N)); 1597}]>; 1598 1599def BTRMask64 : ImmLeaf<i64, [{ 1600 return !isUInt<32>(Imm) && !isInt<32>(Imm) && isPowerOf2_64(~Imm); 1601}]>; 1602 1603def BTCBTSMask64 : ImmLeaf<i64, [{ 1604 return !isInt<32>(Imm) && isPowerOf2_64(Imm); 1605}]>; 1606 1607// For now only do this for optsize. 1608let AddedComplexity = 1, Predicates=[OptForSize] in { 1609 def : Pat<(and GR64:$src1, BTRMask64:$mask), 1610 (BTR64ri8 GR64:$src1, (BTRXForm imm:$mask))>; 1611 def : Pat<(or GR64:$src1, BTCBTSMask64:$mask), 1612 (BTS64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>; 1613 def : Pat<(xor GR64:$src1, BTCBTSMask64:$mask), 1614 (BTC64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>; 1615} 1616 1617 1618// sext_inreg patterns 1619def : Pat<(sext_inreg GR32:$src, i16), 1620 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>; 1621def : Pat<(sext_inreg GR32:$src, i8), 1622 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>; 1623 1624def : Pat<(sext_inreg GR16:$src, i8), 1625 (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)), 1626 sub_16bit)>; 1627 1628def : Pat<(sext_inreg GR64:$src, i32), 1629 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>; 1630def : Pat<(sext_inreg GR64:$src, i16), 1631 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>; 1632def : Pat<(sext_inreg GR64:$src, i8), 1633 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>; 1634 1635// sext, sext_load, zext, zext_load 1636def: Pat<(i16 (sext GR8:$src)), 1637 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>; 1638def: Pat<(sextloadi16i8 addr:$src), 1639 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>; 1640def: Pat<(i16 (zext GR8:$src)), 1641 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>; 1642def: Pat<(zextloadi16i8 addr:$src), 1643 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1644 1645// trunc patterns 1646def : Pat<(i16 (trunc GR32:$src)), 1647 (EXTRACT_SUBREG GR32:$src, sub_16bit)>; 1648def : Pat<(i8 (trunc GR32:$src)), 1649 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), 1650 sub_8bit)>, 1651 Requires<[Not64BitMode]>; 1652def : Pat<(i8 (trunc GR16:$src)), 1653 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1654 sub_8bit)>, 1655 Requires<[Not64BitMode]>; 1656def : Pat<(i32 (trunc GR64:$src)), 1657 (EXTRACT_SUBREG GR64:$src, sub_32bit)>; 1658def : Pat<(i16 (trunc GR64:$src)), 1659 (EXTRACT_SUBREG GR64:$src, sub_16bit)>; 1660def : Pat<(i8 (trunc GR64:$src)), 1661 (EXTRACT_SUBREG GR64:$src, sub_8bit)>; 1662def : Pat<(i8 (trunc GR32:$src)), 1663 (EXTRACT_SUBREG GR32:$src, sub_8bit)>, 1664 Requires<[In64BitMode]>; 1665def : Pat<(i8 (trunc GR16:$src)), 1666 (EXTRACT_SUBREG GR16:$src, sub_8bit)>, 1667 Requires<[In64BitMode]>; 1668 1669def immff00_ffff : ImmLeaf<i32, [{ 1670 return Imm >= 0xff00 && Imm <= 0xffff; 1671}]>; 1672 1673// h-register tricks 1674def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))), 1675 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>, 1676 Requires<[Not64BitMode]>; 1677def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))), 1678 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>, 1679 Requires<[Not64BitMode]>; 1680def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))), 1681 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>, 1682 Requires<[Not64BitMode]>; 1683def : Pat<(srl GR16:$src, (i8 8)), 1684 (EXTRACT_SUBREG 1685 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1686 sub_16bit)>; 1687def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), 1688 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>; 1689def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), 1690 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>; 1691def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), 1692 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>; 1693def : Pat<(srl (and_su GR32:$src, immff00_ffff), (i8 8)), 1694 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>; 1695 1696// h-register tricks. 1697// For now, be conservative on x86-64 and use an h-register extract only if the 1698// value is immediately zero-extended or stored, which are somewhat common 1699// cases. This uses a bunch of code to prevent a register requiring a REX prefix 1700// from being allocated in the same instruction as the h register, as there's 1701// currently no way to describe this requirement to the register allocator. 1702 1703// h-register extract and zero-extend. 1704def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)), 1705 (SUBREG_TO_REG 1706 (i64 0), 1707 (MOVZX32rr8_NOREX 1708 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)), 1709 sub_32bit)>; 1710def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))), 1711 (SUBREG_TO_REG 1712 (i64 0), 1713 (MOVZX32rr8_NOREX 1714 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1715 sub_32bit)>; 1716def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))), 1717 (SUBREG_TO_REG 1718 (i64 0), 1719 (MOVZX32rr8_NOREX 1720 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1721 sub_32bit)>; 1722 1723// h-register extract and store. 1724def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst), 1725 (MOV8mr_NOREX 1726 addr:$dst, 1727 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>; 1728def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst), 1729 (MOV8mr_NOREX 1730 addr:$dst, 1731 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>, 1732 Requires<[In64BitMode]>; 1733def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst), 1734 (MOV8mr_NOREX 1735 addr:$dst, 1736 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>, 1737 Requires<[In64BitMode]>; 1738 1739// Special pattern to catch the last step of __builtin_parity handling. Our 1740// goal is to use an xor of an h-register with the corresponding l-register. 1741// The above patterns would handle this on non 64-bit targets, but for 64-bit 1742// we need to be more careful. We're using a NOREX instruction here in case 1743// register allocation fails to keep the two registers together. So we need to 1744// make sure we can't accidentally mix R8-R15 with an h-register. 1745def : Pat<(X86xor_flag (i8 (trunc GR32:$src)), 1746 (i8 (trunc (srl_su GR32:$src, (i8 8))))), 1747 (XOR8rr_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit), 1748 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>; 1749 1750// (shl x, 1) ==> (add x, x) 1751// Note that if x is undef (immediate or otherwise), we could theoretically 1752// end up with the two uses of x getting different values, producing a result 1753// where the least significant bit is not 0. However, the probability of this 1754// happening is considered low enough that this is officially not a 1755// "real problem". 1756def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>; 1757def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>; 1758def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>; 1759def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>; 1760 1761def shiftMask8 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1762 return isUnneededShiftMask(N, 3); 1763}]>; 1764 1765def shiftMask16 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1766 return isUnneededShiftMask(N, 4); 1767}]>; 1768 1769def shiftMask32 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1770 return isUnneededShiftMask(N, 5); 1771}]>; 1772 1773def shiftMask64 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1774 return isUnneededShiftMask(N, 6); 1775}]>; 1776 1777 1778// Shift amount is implicitly masked. 1779multiclass MaskedShiftAmountPats<SDNode frag, string name> { 1780 // (shift x (and y, 31)) ==> (shift x, y) 1781 def : Pat<(frag GR8:$src1, (shiftMask32 CL)), 1782 (!cast<Instruction>(name # "8rCL") GR8:$src1)>; 1783 def : Pat<(frag GR16:$src1, (shiftMask32 CL)), 1784 (!cast<Instruction>(name # "16rCL") GR16:$src1)>; 1785 def : Pat<(frag GR32:$src1, (shiftMask32 CL)), 1786 (!cast<Instruction>(name # "32rCL") GR32:$src1)>; 1787 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask32 CL)), addr:$dst), 1788 (!cast<Instruction>(name # "8mCL") addr:$dst)>; 1789 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask32 CL)), addr:$dst), 1790 (!cast<Instruction>(name # "16mCL") addr:$dst)>; 1791 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst), 1792 (!cast<Instruction>(name # "32mCL") addr:$dst)>; 1793 1794 // (shift x (and y, 63)) ==> (shift x, y) 1795 def : Pat<(frag GR64:$src1, (shiftMask64 CL)), 1796 (!cast<Instruction>(name # "64rCL") GR64:$src1)>; 1797 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst), 1798 (!cast<Instruction>(name # "64mCL") addr:$dst)>; 1799} 1800 1801defm : MaskedShiftAmountPats<shl, "SHL">; 1802defm : MaskedShiftAmountPats<srl, "SHR">; 1803defm : MaskedShiftAmountPats<sra, "SAR">; 1804 1805// ROL/ROR instructions allow a stronger mask optimization than shift for 8- and 1806// 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount 1807// because over-rotating produces the same result. This is noted in the Intel 1808// docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation 1809// amount could affect EFLAGS results, but that does not matter because we are 1810// not tracking flags for these nodes. 1811multiclass MaskedRotateAmountPats<SDNode frag, string name> { 1812 // (rot x (and y, BitWidth - 1)) ==> (rot x, y) 1813 def : Pat<(frag GR8:$src1, (shiftMask8 CL)), 1814 (!cast<Instruction>(name # "8rCL") GR8:$src1)>; 1815 def : Pat<(frag GR16:$src1, (shiftMask16 CL)), 1816 (!cast<Instruction>(name # "16rCL") GR16:$src1)>; 1817 def : Pat<(frag GR32:$src1, (shiftMask32 CL)), 1818 (!cast<Instruction>(name # "32rCL") GR32:$src1)>; 1819 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask8 CL)), addr:$dst), 1820 (!cast<Instruction>(name # "8mCL") addr:$dst)>; 1821 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask16 CL)), addr:$dst), 1822 (!cast<Instruction>(name # "16mCL") addr:$dst)>; 1823 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst), 1824 (!cast<Instruction>(name # "32mCL") addr:$dst)>; 1825 1826 // (rot x (and y, 63)) ==> (rot x, y) 1827 def : Pat<(frag GR64:$src1, (shiftMask64 CL)), 1828 (!cast<Instruction>(name # "64rCL") GR64:$src1)>; 1829 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst), 1830 (!cast<Instruction>(name # "64mCL") addr:$dst)>; 1831} 1832 1833 1834defm : MaskedRotateAmountPats<rotl, "ROL">; 1835defm : MaskedRotateAmountPats<rotr, "ROR">; 1836 1837// Double "funnel" shift amount is implicitly masked. 1838// (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) (NOTE: modulo32) 1839def : Pat<(X86fshl GR16:$src1, GR16:$src2, (shiftMask32 CL)), 1840 (SHLD16rrCL GR16:$src1, GR16:$src2)>; 1841def : Pat<(X86fshr GR16:$src2, GR16:$src1, (shiftMask32 CL)), 1842 (SHRD16rrCL GR16:$src1, GR16:$src2)>; 1843 1844// (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) 1845def : Pat<(fshl GR32:$src1, GR32:$src2, (shiftMask32 CL)), 1846 (SHLD32rrCL GR32:$src1, GR32:$src2)>; 1847def : Pat<(fshr GR32:$src2, GR32:$src1, (shiftMask32 CL)), 1848 (SHRD32rrCL GR32:$src1, GR32:$src2)>; 1849 1850// (fshl/fshr x (and y, 63)) ==> (fshl/fshr x, y) 1851def : Pat<(fshl GR64:$src1, GR64:$src2, (shiftMask64 CL)), 1852 (SHLD64rrCL GR64:$src1, GR64:$src2)>; 1853def : Pat<(fshr GR64:$src2, GR64:$src1, (shiftMask64 CL)), 1854 (SHRD64rrCL GR64:$src1, GR64:$src2)>; 1855 1856let Predicates = [HasBMI2] in { 1857 let AddedComplexity = 1 in { 1858 def : Pat<(sra GR32:$src1, (shiftMask32 GR8:$src2)), 1859 (SARX32rr GR32:$src1, 1860 (INSERT_SUBREG 1861 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1862 def : Pat<(sra GR64:$src1, (shiftMask64 GR8:$src2)), 1863 (SARX64rr GR64:$src1, 1864 (INSERT_SUBREG 1865 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1866 1867 def : Pat<(srl GR32:$src1, (shiftMask32 GR8:$src2)), 1868 (SHRX32rr GR32:$src1, 1869 (INSERT_SUBREG 1870 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1871 def : Pat<(srl GR64:$src1, (shiftMask64 GR8:$src2)), 1872 (SHRX64rr GR64:$src1, 1873 (INSERT_SUBREG 1874 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1875 1876 def : Pat<(shl GR32:$src1, (shiftMask32 GR8:$src2)), 1877 (SHLX32rr GR32:$src1, 1878 (INSERT_SUBREG 1879 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1880 def : Pat<(shl GR64:$src1, (shiftMask64 GR8:$src2)), 1881 (SHLX64rr GR64:$src1, 1882 (INSERT_SUBREG 1883 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1884 } 1885 1886 def : Pat<(sra (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1887 (SARX32rm addr:$src1, 1888 (INSERT_SUBREG 1889 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1890 def : Pat<(sra (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1891 (SARX64rm addr:$src1, 1892 (INSERT_SUBREG 1893 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1894 1895 def : Pat<(srl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1896 (SHRX32rm addr:$src1, 1897 (INSERT_SUBREG 1898 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1899 def : Pat<(srl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1900 (SHRX64rm addr:$src1, 1901 (INSERT_SUBREG 1902 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1903 1904 def : Pat<(shl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1905 (SHLX32rm addr:$src1, 1906 (INSERT_SUBREG 1907 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1908 def : Pat<(shl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1909 (SHLX64rm addr:$src1, 1910 (INSERT_SUBREG 1911 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1912} 1913 1914// Use BTR/BTS/BTC for clearing/setting/toggling a bit in a variable location. 1915multiclass one_bit_patterns<RegisterClass RC, ValueType VT, Instruction BTR, 1916 Instruction BTS, Instruction BTC, 1917 PatFrag ShiftMask> { 1918 def : Pat<(and RC:$src1, (rotl -2, GR8:$src2)), 1919 (BTR RC:$src1, 1920 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1921 def : Pat<(or RC:$src1, (shl 1, GR8:$src2)), 1922 (BTS RC:$src1, 1923 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1924 def : Pat<(xor RC:$src1, (shl 1, GR8:$src2)), 1925 (BTC RC:$src1, 1926 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1927 1928 // Similar to above, but removing unneeded masking of the shift amount. 1929 def : Pat<(and RC:$src1, (rotl -2, (ShiftMask GR8:$src2))), 1930 (BTR RC:$src1, 1931 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1932 def : Pat<(or RC:$src1, (shl 1, (ShiftMask GR8:$src2))), 1933 (BTS RC:$src1, 1934 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1935 def : Pat<(xor RC:$src1, (shl 1, (ShiftMask GR8:$src2))), 1936 (BTC RC:$src1, 1937 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1938} 1939 1940defm : one_bit_patterns<GR16, i16, BTR16rr, BTS16rr, BTC16rr, shiftMask16>; 1941defm : one_bit_patterns<GR32, i32, BTR32rr, BTS32rr, BTC32rr, shiftMask32>; 1942defm : one_bit_patterns<GR64, i64, BTR64rr, BTS64rr, BTC64rr, shiftMask64>; 1943 1944//===----------------------------------------------------------------------===// 1945// EFLAGS-defining Patterns 1946//===----------------------------------------------------------------------===// 1947 1948// add reg, reg 1949def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>; 1950def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>; 1951def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>; 1952def : Pat<(add GR64:$src1, GR64:$src2), (ADD64rr GR64:$src1, GR64:$src2)>; 1953 1954// add reg, mem 1955def : Pat<(add GR8:$src1, (loadi8 addr:$src2)), 1956 (ADD8rm GR8:$src1, addr:$src2)>; 1957def : Pat<(add GR16:$src1, (loadi16 addr:$src2)), 1958 (ADD16rm GR16:$src1, addr:$src2)>; 1959def : Pat<(add GR32:$src1, (loadi32 addr:$src2)), 1960 (ADD32rm GR32:$src1, addr:$src2)>; 1961def : Pat<(add GR64:$src1, (loadi64 addr:$src2)), 1962 (ADD64rm GR64:$src1, addr:$src2)>; 1963 1964// add reg, imm 1965def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>; 1966def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>; 1967def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>; 1968def : Pat<(add GR16:$src1, i16immSExt8:$src2), 1969 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>; 1970def : Pat<(add GR32:$src1, i32immSExt8:$src2), 1971 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>; 1972def : Pat<(add GR64:$src1, i64immSExt8:$src2), 1973 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; 1974def : Pat<(add GR64:$src1, i64immSExt32:$src2), 1975 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>; 1976 1977// sub reg, reg 1978def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>; 1979def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>; 1980def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>; 1981def : Pat<(sub GR64:$src1, GR64:$src2), (SUB64rr GR64:$src1, GR64:$src2)>; 1982 1983// sub reg, mem 1984def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)), 1985 (SUB8rm GR8:$src1, addr:$src2)>; 1986def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)), 1987 (SUB16rm GR16:$src1, addr:$src2)>; 1988def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)), 1989 (SUB32rm GR32:$src1, addr:$src2)>; 1990def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)), 1991 (SUB64rm GR64:$src1, addr:$src2)>; 1992 1993// sub reg, imm 1994def : Pat<(sub GR8:$src1, imm:$src2), 1995 (SUB8ri GR8:$src1, imm:$src2)>; 1996def : Pat<(sub GR16:$src1, imm:$src2), 1997 (SUB16ri GR16:$src1, imm:$src2)>; 1998def : Pat<(sub GR32:$src1, imm:$src2), 1999 (SUB32ri GR32:$src1, imm:$src2)>; 2000def : Pat<(sub GR16:$src1, i16immSExt8:$src2), 2001 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>; 2002def : Pat<(sub GR32:$src1, i32immSExt8:$src2), 2003 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>; 2004def : Pat<(sub GR64:$src1, i64immSExt8:$src2), 2005 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>; 2006def : Pat<(sub GR64:$src1, i64immSExt32:$src2), 2007 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>; 2008 2009// sub 0, reg 2010def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>; 2011def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>; 2012def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>; 2013def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>; 2014 2015// mul reg, reg 2016def : Pat<(mul GR16:$src1, GR16:$src2), 2017 (IMUL16rr GR16:$src1, GR16:$src2)>; 2018def : Pat<(mul GR32:$src1, GR32:$src2), 2019 (IMUL32rr GR32:$src1, GR32:$src2)>; 2020def : Pat<(mul GR64:$src1, GR64:$src2), 2021 (IMUL64rr GR64:$src1, GR64:$src2)>; 2022 2023// mul reg, mem 2024def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)), 2025 (IMUL16rm GR16:$src1, addr:$src2)>; 2026def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)), 2027 (IMUL32rm GR32:$src1, addr:$src2)>; 2028def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)), 2029 (IMUL64rm GR64:$src1, addr:$src2)>; 2030 2031// mul reg, imm 2032def : Pat<(mul GR16:$src1, imm:$src2), 2033 (IMUL16rri GR16:$src1, imm:$src2)>; 2034def : Pat<(mul GR32:$src1, imm:$src2), 2035 (IMUL32rri GR32:$src1, imm:$src2)>; 2036def : Pat<(mul GR16:$src1, i16immSExt8:$src2), 2037 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>; 2038def : Pat<(mul GR32:$src1, i32immSExt8:$src2), 2039 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>; 2040def : Pat<(mul GR64:$src1, i64immSExt8:$src2), 2041 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>; 2042def : Pat<(mul GR64:$src1, i64immSExt32:$src2), 2043 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>; 2044 2045// reg = mul mem, imm 2046def : Pat<(mul (loadi16 addr:$src1), imm:$src2), 2047 (IMUL16rmi addr:$src1, imm:$src2)>; 2048def : Pat<(mul (loadi32 addr:$src1), imm:$src2), 2049 (IMUL32rmi addr:$src1, imm:$src2)>; 2050def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2), 2051 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>; 2052def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2), 2053 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>; 2054def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2), 2055 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>; 2056def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2), 2057 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>; 2058 2059// Increment/Decrement reg. 2060// Do not make INC/DEC if it is slow 2061let Predicates = [UseIncDec] in { 2062 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>; 2063 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>; 2064 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>; 2065 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>; 2066 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>; 2067 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>; 2068 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>; 2069 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>; 2070 2071 def : Pat<(X86add_flag_nocf GR8:$src, -1), (DEC8r GR8:$src)>; 2072 def : Pat<(X86add_flag_nocf GR16:$src, -1), (DEC16r GR16:$src)>; 2073 def : Pat<(X86add_flag_nocf GR32:$src, -1), (DEC32r GR32:$src)>; 2074 def : Pat<(X86add_flag_nocf GR64:$src, -1), (DEC64r GR64:$src)>; 2075 def : Pat<(X86sub_flag_nocf GR8:$src, -1), (INC8r GR8:$src)>; 2076 def : Pat<(X86sub_flag_nocf GR16:$src, -1), (INC16r GR16:$src)>; 2077 def : Pat<(X86sub_flag_nocf GR32:$src, -1), (INC32r GR32:$src)>; 2078 def : Pat<(X86sub_flag_nocf GR64:$src, -1), (INC64r GR64:$src)>; 2079} 2080 2081// or reg/reg. 2082def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>; 2083def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>; 2084def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>; 2085def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>; 2086 2087// or reg/mem 2088def : Pat<(or GR8:$src1, (loadi8 addr:$src2)), 2089 (OR8rm GR8:$src1, addr:$src2)>; 2090def : Pat<(or GR16:$src1, (loadi16 addr:$src2)), 2091 (OR16rm GR16:$src1, addr:$src2)>; 2092def : Pat<(or GR32:$src1, (loadi32 addr:$src2)), 2093 (OR32rm GR32:$src1, addr:$src2)>; 2094def : Pat<(or GR64:$src1, (loadi64 addr:$src2)), 2095 (OR64rm GR64:$src1, addr:$src2)>; 2096 2097// or reg/imm 2098def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>; 2099def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>; 2100def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>; 2101def : Pat<(or GR16:$src1, i16immSExt8:$src2), 2102 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>; 2103def : Pat<(or GR32:$src1, i32immSExt8:$src2), 2104 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>; 2105def : Pat<(or GR64:$src1, i64immSExt8:$src2), 2106 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>; 2107def : Pat<(or GR64:$src1, i64immSExt32:$src2), 2108 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>; 2109 2110// xor reg/reg 2111def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>; 2112def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>; 2113def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>; 2114def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>; 2115 2116// xor reg/mem 2117def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)), 2118 (XOR8rm GR8:$src1, addr:$src2)>; 2119def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)), 2120 (XOR16rm GR16:$src1, addr:$src2)>; 2121def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)), 2122 (XOR32rm GR32:$src1, addr:$src2)>; 2123def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)), 2124 (XOR64rm GR64:$src1, addr:$src2)>; 2125 2126// xor reg/imm 2127def : Pat<(xor GR8:$src1, imm:$src2), 2128 (XOR8ri GR8:$src1, imm:$src2)>; 2129def : Pat<(xor GR16:$src1, imm:$src2), 2130 (XOR16ri GR16:$src1, imm:$src2)>; 2131def : Pat<(xor GR32:$src1, imm:$src2), 2132 (XOR32ri GR32:$src1, imm:$src2)>; 2133def : Pat<(xor GR16:$src1, i16immSExt8:$src2), 2134 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; 2135def : Pat<(xor GR32:$src1, i32immSExt8:$src2), 2136 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; 2137def : Pat<(xor GR64:$src1, i64immSExt8:$src2), 2138 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; 2139def : Pat<(xor GR64:$src1, i64immSExt32:$src2), 2140 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; 2141 2142// and reg/reg 2143def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>; 2144def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>; 2145def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>; 2146def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>; 2147 2148// and reg/mem 2149def : Pat<(and GR8:$src1, (loadi8 addr:$src2)), 2150 (AND8rm GR8:$src1, addr:$src2)>; 2151def : Pat<(and GR16:$src1, (loadi16 addr:$src2)), 2152 (AND16rm GR16:$src1, addr:$src2)>; 2153def : Pat<(and GR32:$src1, (loadi32 addr:$src2)), 2154 (AND32rm GR32:$src1, addr:$src2)>; 2155def : Pat<(and GR64:$src1, (loadi64 addr:$src2)), 2156 (AND64rm GR64:$src1, addr:$src2)>; 2157 2158// and reg/imm 2159def : Pat<(and GR8:$src1, imm:$src2), 2160 (AND8ri GR8:$src1, imm:$src2)>; 2161def : Pat<(and GR16:$src1, imm:$src2), 2162 (AND16ri GR16:$src1, imm:$src2)>; 2163def : Pat<(and GR32:$src1, imm:$src2), 2164 (AND32ri GR32:$src1, imm:$src2)>; 2165def : Pat<(and GR16:$src1, i16immSExt8:$src2), 2166 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>; 2167def : Pat<(and GR32:$src1, i32immSExt8:$src2), 2168 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>; 2169def : Pat<(and GR64:$src1, i64immSExt8:$src2), 2170 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>; 2171def : Pat<(and GR64:$src1, i64immSExt32:$src2), 2172 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>; 2173 2174// Bit scan instruction patterns to match explicit zero-undef behavior. 2175def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>; 2176def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>; 2177def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>; 2178def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>; 2179def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>; 2180def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>; 2181 2182// When HasMOVBE is enabled it is possible to get a non-legalized 2183// register-register 16 bit bswap. This maps it to a ROL instruction. 2184let Predicates = [HasMOVBE] in { 2185 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>; 2186} 2187