1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_x64_Assembler_x64_h
8 #define jit_x64_Assembler_x64_h
9
10 #include "mozilla/ArrayUtils.h"
11
12 #include "jit/JitCode.h"
13 #include "jit/JitRealm.h"
14 #include "jit/shared/Assembler-shared.h"
15
16 namespace js {
17 namespace jit {
18
19 static constexpr Register rax{X86Encoding::rax};
20 static constexpr Register rbx{X86Encoding::rbx};
21 static constexpr Register rcx{X86Encoding::rcx};
22 static constexpr Register rdx{X86Encoding::rdx};
23 static constexpr Register rsi{X86Encoding::rsi};
24 static constexpr Register rdi{X86Encoding::rdi};
25 static constexpr Register rbp{X86Encoding::rbp};
26 static constexpr Register r8{X86Encoding::r8};
27 static constexpr Register r9{X86Encoding::r9};
28 static constexpr Register r10{X86Encoding::r10};
29 static constexpr Register r11{X86Encoding::r11};
30 static constexpr Register r12{X86Encoding::r12};
31 static constexpr Register r13{X86Encoding::r13};
32 static constexpr Register r14{X86Encoding::r14};
33 static constexpr Register r15{X86Encoding::r15};
34 static constexpr Register rsp{X86Encoding::rsp};
35
36 static constexpr FloatRegister xmm0 =
37 FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
38 static constexpr FloatRegister xmm1 =
39 FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
40 static constexpr FloatRegister xmm2 =
41 FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
42 static constexpr FloatRegister xmm3 =
43 FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
44 static constexpr FloatRegister xmm4 =
45 FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
46 static constexpr FloatRegister xmm5 =
47 FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
48 static constexpr FloatRegister xmm6 =
49 FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
50 static constexpr FloatRegister xmm7 =
51 FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
52 static constexpr FloatRegister xmm8 =
53 FloatRegister(X86Encoding::xmm8, FloatRegisters::Double);
54 static constexpr FloatRegister xmm9 =
55 FloatRegister(X86Encoding::xmm9, FloatRegisters::Double);
56 static constexpr FloatRegister xmm10 =
57 FloatRegister(X86Encoding::xmm10, FloatRegisters::Double);
58 static constexpr FloatRegister xmm11 =
59 FloatRegister(X86Encoding::xmm11, FloatRegisters::Double);
60 static constexpr FloatRegister xmm12 =
61 FloatRegister(X86Encoding::xmm12, FloatRegisters::Double);
62 static constexpr FloatRegister xmm13 =
63 FloatRegister(X86Encoding::xmm13, FloatRegisters::Double);
64 static constexpr FloatRegister xmm14 =
65 FloatRegister(X86Encoding::xmm14, FloatRegisters::Double);
66 static constexpr FloatRegister xmm15 =
67 FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
68
69 // X86-common synonyms.
70 static constexpr Register eax = rax;
71 static constexpr Register ebx = rbx;
72 static constexpr Register ecx = rcx;
73 static constexpr Register edx = rdx;
74 static constexpr Register esi = rsi;
75 static constexpr Register edi = rdi;
76 static constexpr Register ebp = rbp;
77 static constexpr Register esp = rsp;
78
79 static constexpr Register InvalidReg{X86Encoding::invalid_reg};
80 static constexpr FloatRegister InvalidFloatReg = FloatRegister();
81
82 static constexpr Register StackPointer = rsp;
83 static constexpr Register FramePointer = rbp;
84 static constexpr Register JSReturnReg = rcx;
85 // Avoid, except for assertions.
86 static constexpr Register JSReturnReg_Type = JSReturnReg;
87 static constexpr Register JSReturnReg_Data = JSReturnReg;
88
89 static constexpr Register ScratchReg = r11;
90
91 // Helper class for ScratchRegister usage. Asserts that only one piece
92 // of code thinks it has exclusive ownership of the scratch register.
93 struct ScratchRegisterScope : public AutoRegisterScope {
ScratchRegisterScopeScratchRegisterScope94 explicit ScratchRegisterScope(MacroAssembler& masm)
95 : AutoRegisterScope(masm, ScratchReg) {}
96 };
97
98 static constexpr Register ReturnReg = rax;
99 static constexpr Register HeapReg = r15;
100 static constexpr Register64 ReturnReg64(rax);
101 static constexpr FloatRegister ReturnFloat32Reg =
102 FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
103 static constexpr FloatRegister ReturnDoubleReg =
104 FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
105 static constexpr FloatRegister ReturnSimd128Reg =
106 FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
107 static constexpr FloatRegister ScratchFloat32Reg =
108 FloatRegister(X86Encoding::xmm15, FloatRegisters::Single);
109 static constexpr FloatRegister ScratchDoubleReg =
110 FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
111 static constexpr FloatRegister ScratchSimd128Reg =
112 FloatRegister(X86Encoding::xmm15, FloatRegisters::Simd128);
113
114 // Avoid rbp, which is the FramePointer, which is unavailable in some modes.
115 static constexpr Register CallTempReg0 = rax;
116 static constexpr Register CallTempReg1 = rdi;
117 static constexpr Register CallTempReg2 = rbx;
118 static constexpr Register CallTempReg3 = rcx;
119 static constexpr Register CallTempReg4 = rsi;
120 static constexpr Register CallTempReg5 = rdx;
121
122 // Different argument registers for WIN64
123 #if defined(_WIN64)
124 static constexpr Register IntArgReg0 = rcx;
125 static constexpr Register IntArgReg1 = rdx;
126 static constexpr Register IntArgReg2 = r8;
127 static constexpr Register IntArgReg3 = r9;
128 static constexpr uint32_t NumIntArgRegs = 4;
129 static constexpr Register IntArgRegs[NumIntArgRegs] = {rcx, rdx, r8, r9};
130
131 static constexpr Register CallTempNonArgRegs[] = {rax, rdi, rbx, rsi};
132 static constexpr uint32_t NumCallTempNonArgRegs =
133 mozilla::ArrayLength(CallTempNonArgRegs);
134
135 static constexpr FloatRegister FloatArgReg0 = xmm0;
136 static constexpr FloatRegister FloatArgReg1 = xmm1;
137 static constexpr FloatRegister FloatArgReg2 = xmm2;
138 static constexpr FloatRegister FloatArgReg3 = xmm3;
139 static constexpr uint32_t NumFloatArgRegs = 4;
140 static constexpr FloatRegister FloatArgRegs[NumFloatArgRegs] = {xmm0, xmm1,
141 xmm2, xmm3};
142 #else
143 static constexpr Register IntArgReg0 = rdi;
144 static constexpr Register IntArgReg1 = rsi;
145 static constexpr Register IntArgReg2 = rdx;
146 static constexpr Register IntArgReg3 = rcx;
147 static constexpr Register IntArgReg4 = r8;
148 static constexpr Register IntArgReg5 = r9;
149 static constexpr uint32_t NumIntArgRegs = 6;
150 static constexpr Register IntArgRegs[NumIntArgRegs] = {rdi, rsi, rdx,
151 rcx, r8, r9};
152
153 static constexpr Register CallTempNonArgRegs[] = {rax, rbx};
154 static constexpr uint32_t NumCallTempNonArgRegs =
155 mozilla::ArrayLength(CallTempNonArgRegs);
156
157 static constexpr FloatRegister FloatArgReg0 = xmm0;
158 static constexpr FloatRegister FloatArgReg1 = xmm1;
159 static constexpr FloatRegister FloatArgReg2 = xmm2;
160 static constexpr FloatRegister FloatArgReg3 = xmm3;
161 static constexpr FloatRegister FloatArgReg4 = xmm4;
162 static constexpr FloatRegister FloatArgReg5 = xmm5;
163 static constexpr FloatRegister FloatArgReg6 = xmm6;
164 static constexpr FloatRegister FloatArgReg7 = xmm7;
165 static constexpr uint32_t NumFloatArgRegs = 8;
166 static constexpr FloatRegister FloatArgRegs[NumFloatArgRegs] = {
167 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7};
168 #endif
169
170 // Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
171 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
172 static constexpr Register RegExpMatcherStringReg = CallTempReg1;
173 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
174
175 // Registerd used in RegExpTester instruction (do not use ReturnReg).
176 static constexpr Register RegExpTesterRegExpReg = CallTempReg1;
177 static constexpr Register RegExpTesterStringReg = CallTempReg2;
178 static constexpr Register RegExpTesterLastIndexReg = CallTempReg3;
179
180 class ABIArgGenerator {
181 #if defined(XP_WIN)
182 unsigned regIndex_;
183 #else
184 unsigned intRegIndex_;
185 unsigned floatRegIndex_;
186 #endif
187 uint32_t stackOffset_;
188 ABIArg current_;
189
190 public:
191 ABIArgGenerator();
192 ABIArg next(MIRType argType);
current()193 ABIArg& current() { return current_; }
stackBytesConsumedSoFar()194 uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
195 };
196
197 // These registers may be volatile or nonvolatile.
198 // Avoid r11, which is the MacroAssembler's ScratchReg.
199 static constexpr Register ABINonArgReg0 = rax;
200 static constexpr Register ABINonArgReg1 = rbx;
201 static constexpr Register ABINonArgReg2 = r10;
202 static constexpr Register ABINonArgReg3 = r12;
203
204 // This register may be volatile or nonvolatile. Avoid xmm15 which is the
205 // ScratchDoubleReg.
206 static constexpr FloatRegister ABINonArgDoubleReg =
207 FloatRegister(X86Encoding::xmm8, FloatRegisters::Double);
208
209 // These registers may be volatile or nonvolatile.
210 // Note: these three registers are all guaranteed to be different
211 static constexpr Register ABINonArgReturnReg0 = r10;
212 static constexpr Register ABINonArgReturnReg1 = r12;
213 static constexpr Register ABINonVolatileReg = r13;
214
215 // This register is guaranteed to be clobberable during the prologue and
216 // epilogue of an ABI call which must preserve both ABI argument, return
217 // and non-volatile registers.
218 static constexpr Register ABINonArgReturnVolatileReg = r10;
219
220 // TLS pointer argument register for WebAssembly functions. This must not alias
221 // any other register used for passing function arguments or return values.
222 // Preserved by WebAssembly functions.
223 static constexpr Register WasmTlsReg = r14;
224
225 // Registers used for asm.js/wasm table calls. These registers must be disjoint
226 // from the ABI argument registers, WasmTlsReg and each other.
227 static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
228 static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
229 static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
230 static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
231
232 // Register used as a scratch along the return path in the fast js -> wasm stub
233 // code. This must not overlap ReturnReg, JSReturnOperand, or WasmTlsReg. It
234 // must be a volatile register.
235 static constexpr Register WasmJitEntryReturnScratch = rbx;
236
237 static constexpr Register OsrFrameReg = IntArgReg3;
238
239 static constexpr Register PreBarrierReg = rdx;
240
241 static constexpr Register InterpreterPCReg = r14;
242
243 static constexpr uint32_t ABIStackAlignment = 16;
244 static constexpr uint32_t CodeAlignment = 16;
245 static constexpr uint32_t JitStackAlignment = 16;
246
247 static constexpr uint32_t JitStackValueAlignment =
248 JitStackAlignment / sizeof(Value);
249 static_assert(JitStackAlignment % sizeof(Value) == 0 &&
250 JitStackValueAlignment >= 1,
251 "Stack alignment should be a non-zero multiple of sizeof(Value)");
252
253 static constexpr uint32_t SimdMemoryAlignment = 16;
254
255 static_assert(CodeAlignment % SimdMemoryAlignment == 0,
256 "Code alignment should be larger than any of the alignments "
257 "which are used for "
258 "the constant sections of the code buffer. Thus it should be "
259 "larger than the "
260 "alignment for SIMD constants.");
261
262 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
263 "Stack alignment should be larger than any of the alignments "
264 "which are used for "
265 "spilled values. Thus it should be larger than the alignment "
266 "for SIMD accesses.");
267
268 static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
269 static constexpr uint32_t WasmTrapInstructionLength = 2;
270
271 static constexpr Scale ScalePointer = TimesEight;
272
273 } // namespace jit
274 } // namespace js
275
276 #include "jit/x86-shared/Assembler-x86-shared.h"
277
278 namespace js {
279 namespace jit {
280
281 // Return operand from a JS -> JS call.
282 static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
283
284 class Assembler : public AssemblerX86Shared {
285 // x64 jumps may need extra bits of relocation, because a jump may extend
286 // beyond the signed 32-bit range. To account for this we add an extended
287 // jump table at the bottom of the instruction stream, and if a jump
288 // overflows its range, it will redirect here.
289 //
290 // In our relocation table, we store two offsets instead of one: the offset
291 // to the original jump, and an offset to the extended jump if we will need
292 // to use it instead. The offsets are stored as:
293 // [unsigned] Unsigned offset to short jump, from the start of the code.
294 // [unsigned] Unsigned offset to the extended jump, from the start of
295 // the jump table, in units of SizeOfJumpTableEntry.
296 //
297 // The start of the relocation table contains the offset from the code
298 // buffer to the start of the extended jump table.
299 //
300 // Each entry in this table is a jmp [rip], followed by a ud2 to hint to the
301 // hardware branch predictor that there is no fallthrough, followed by the
302 // eight bytes containing an immediate address. This comes out to 16 bytes.
303 // +1 byte for opcode
304 // +1 byte for mod r/m
305 // +4 bytes for rip-relative offset (2)
306 // +2 bytes for ud2 instruction
307 // +8 bytes for 64-bit address
308 //
309 static const uint32_t SizeOfExtendedJump = 1 + 1 + 4 + 2 + 8;
310 static const uint32_t SizeOfJumpTableEntry = 16;
311
312 uint32_t extendedJumpTable_;
313
314 static JitCode* CodeFromJump(JitCode* code, uint8_t* jump);
315
316 private:
317 void writeRelocation(JmpSrc src, RelocationKind reloc);
318 void addPendingJump(JmpSrc src, ImmPtr target, RelocationKind reloc);
319
320 protected:
321 size_t addPatchableJump(JmpSrc src, RelocationKind reloc);
322
323 public:
324 using AssemblerX86Shared::j;
325 using AssemblerX86Shared::jmp;
326 using AssemblerX86Shared::pop;
327 using AssemblerX86Shared::push;
328 using AssemblerX86Shared::vmovq;
329
Assembler()330 Assembler() : extendedJumpTable_(0) {}
331
332 static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
333 CompactBufferReader& reader);
334
335 // The buffer is about to be linked, make sure any constant pools or excess
336 // bookkeeping has been flushed to the instruction stream.
337 void finish();
338
339 // Copy the assembly code to the given buffer, and perform any pending
340 // relocations relying on the target address.
341 void executableCopy(uint8_t* buffer);
342
343 // Actual assembly emitting functions.
344
push(const ImmGCPtr ptr)345 void push(const ImmGCPtr ptr) {
346 movq(ptr, ScratchReg);
347 push(ScratchReg);
348 }
push(const ImmWord ptr)349 void push(const ImmWord ptr) {
350 // We often end up with ImmWords that actually fit into int32.
351 // Be aware of the sign extension behavior.
352 if (ptr.value <= INT32_MAX) {
353 push(Imm32(ptr.value));
354 } else {
355 movq(ptr, ScratchReg);
356 push(ScratchReg);
357 }
358 }
push(ImmPtr imm)359 void push(ImmPtr imm) { push(ImmWord(uintptr_t(imm.value))); }
push(FloatRegister src)360 void push(FloatRegister src) {
361 subq(Imm32(sizeof(double)), StackPointer);
362 vmovsd(src, Address(StackPointer, 0));
363 }
pushWithPatch(ImmWord word)364 CodeOffset pushWithPatch(ImmWord word) {
365 CodeOffset label = movWithPatch(word, ScratchReg);
366 push(ScratchReg);
367 return label;
368 }
369
pop(FloatRegister src)370 void pop(FloatRegister src) {
371 vmovsd(Address(StackPointer, 0), src);
372 addq(Imm32(sizeof(double)), StackPointer);
373 }
374
movWithPatch(ImmWord word,Register dest)375 CodeOffset movWithPatch(ImmWord word, Register dest) {
376 masm.movq_i64r(word.value, dest.encoding());
377 return CodeOffset(masm.currentOffset());
378 }
movWithPatch(ImmPtr imm,Register dest)379 CodeOffset movWithPatch(ImmPtr imm, Register dest) {
380 return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
381 }
382
383 // This is for patching during code generation, not after.
patchAddq(CodeOffset offset,int32_t n)384 void patchAddq(CodeOffset offset, int32_t n) {
385 unsigned char* code = masm.data();
386 X86Encoding::SetInt32(code + offset.offset(), n);
387 }
388
389 // Load an ImmWord value into a register. Note that this instruction will
390 // attempt to optimize its immediate field size. When a full 64-bit
391 // immediate is needed for a relocation, use movWithPatch.
movq(ImmWord word,Register dest)392 void movq(ImmWord word, Register dest) {
393 // Load a 64-bit immediate into a register. If the value falls into
394 // certain ranges, we can use specialized instructions which have
395 // smaller encodings.
396 if (word.value <= UINT32_MAX) {
397 // movl has a 32-bit unsigned (effectively) immediate field.
398 masm.movl_i32r((uint32_t)word.value, dest.encoding());
399 } else if ((intptr_t)word.value >= INT32_MIN &&
400 (intptr_t)word.value <= INT32_MAX) {
401 // movq has a 32-bit signed immediate field.
402 masm.movq_i32r((int32_t)(intptr_t)word.value, dest.encoding());
403 } else {
404 // Otherwise use movabs.
405 masm.movq_i64r(word.value, dest.encoding());
406 }
407 }
movq(ImmPtr imm,Register dest)408 void movq(ImmPtr imm, Register dest) {
409 movq(ImmWord(uintptr_t(imm.value)), dest);
410 }
movq(ImmGCPtr ptr,Register dest)411 void movq(ImmGCPtr ptr, Register dest) {
412 masm.movq_i64r(uintptr_t(ptr.value), dest.encoding());
413 writeDataRelocation(ptr);
414 }
movq(const Operand & src,Register dest)415 void movq(const Operand& src, Register dest) {
416 switch (src.kind()) {
417 case Operand::REG:
418 masm.movq_rr(src.reg(), dest.encoding());
419 break;
420 case Operand::MEM_REG_DISP:
421 masm.movq_mr(src.disp(), src.base(), dest.encoding());
422 break;
423 case Operand::MEM_SCALE:
424 masm.movq_mr(src.disp(), src.base(), src.index(), src.scale(),
425 dest.encoding());
426 break;
427 case Operand::MEM_ADDRESS32:
428 masm.movq_mr(src.address(), dest.encoding());
429 break;
430 default:
431 MOZ_CRASH("unexpected operand kind");
432 }
433 }
movq(Register src,const Operand & dest)434 void movq(Register src, const Operand& dest) {
435 switch (dest.kind()) {
436 case Operand::REG:
437 masm.movq_rr(src.encoding(), dest.reg());
438 break;
439 case Operand::MEM_REG_DISP:
440 masm.movq_rm(src.encoding(), dest.disp(), dest.base());
441 break;
442 case Operand::MEM_SCALE:
443 masm.movq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
444 dest.scale());
445 break;
446 case Operand::MEM_ADDRESS32:
447 masm.movq_rm(src.encoding(), dest.address());
448 break;
449 default:
450 MOZ_CRASH("unexpected operand kind");
451 }
452 }
movq(Imm32 imm32,const Operand & dest)453 void movq(Imm32 imm32, const Operand& dest) {
454 switch (dest.kind()) {
455 case Operand::REG:
456 masm.movl_i32r(imm32.value, dest.reg());
457 break;
458 case Operand::MEM_REG_DISP:
459 masm.movq_i32m(imm32.value, dest.disp(), dest.base());
460 break;
461 case Operand::MEM_SCALE:
462 masm.movq_i32m(imm32.value, dest.disp(), dest.base(), dest.index(),
463 dest.scale());
464 break;
465 case Operand::MEM_ADDRESS32:
466 masm.movq_i32m(imm32.value, dest.address());
467 break;
468 default:
469 MOZ_CRASH("unexpected operand kind");
470 }
471 }
vmovq(Register src,FloatRegister dest)472 void vmovq(Register src, FloatRegister dest) {
473 masm.vmovq_rr(src.encoding(), dest.encoding());
474 }
vmovq(FloatRegister src,Register dest)475 void vmovq(FloatRegister src, Register dest) {
476 masm.vmovq_rr(src.encoding(), dest.encoding());
477 }
movq(Register src,Register dest)478 void movq(Register src, Register dest) {
479 masm.movq_rr(src.encoding(), dest.encoding());
480 }
481
cmovCCq(Condition cond,const Operand & src,Register dest)482 void cmovCCq(Condition cond, const Operand& src, Register dest) {
483 X86Encoding::Condition cc = static_cast<X86Encoding::Condition>(cond);
484 switch (src.kind()) {
485 case Operand::REG:
486 masm.cmovCCq_rr(cc, src.reg(), dest.encoding());
487 break;
488 case Operand::MEM_REG_DISP:
489 masm.cmovCCq_mr(cc, src.disp(), src.base(), dest.encoding());
490 break;
491 case Operand::MEM_SCALE:
492 masm.cmovCCq_mr(cc, src.disp(), src.base(), src.index(), src.scale(),
493 dest.encoding());
494 break;
495 default:
496 MOZ_CRASH("unexpected operand kind");
497 }
498 }
cmovCCq(Condition cond,Register src,Register dest)499 void cmovCCq(Condition cond, Register src, Register dest) {
500 X86Encoding::Condition cc = static_cast<X86Encoding::Condition>(cond);
501 masm.cmovCCq_rr(cc, src.encoding(), dest.encoding());
502 }
cmovzq(const Operand & src,Register dest)503 void cmovzq(const Operand& src, Register dest) {
504 cmovCCq(Condition::Zero, src, dest);
505 }
cmovnzq(const Operand & src,Register dest)506 void cmovnzq(const Operand& src, Register dest) {
507 cmovCCq(Condition::NonZero, src, dest);
508 }
509
510 template <typename T>
lock_addq(T src,const Operand & op)511 void lock_addq(T src, const Operand& op) {
512 masm.prefix_lock();
513 addq(src, op);
514 }
515 template <typename T>
lock_subq(T src,const Operand & op)516 void lock_subq(T src, const Operand& op) {
517 masm.prefix_lock();
518 subq(src, op);
519 }
520 template <typename T>
lock_andq(T src,const Operand & op)521 void lock_andq(T src, const Operand& op) {
522 masm.prefix_lock();
523 andq(src, op);
524 }
525 template <typename T>
lock_orq(T src,const Operand & op)526 void lock_orq(T src, const Operand& op) {
527 masm.prefix_lock();
528 orq(src, op);
529 }
530 template <typename T>
lock_xorq(T src,const Operand & op)531 void lock_xorq(T src, const Operand& op) {
532 masm.prefix_lock();
533 xorq(src, op);
534 }
535
lock_cmpxchgq(Register src,const Operand & mem)536 void lock_cmpxchgq(Register src, const Operand& mem) {
537 masm.prefix_lock();
538 switch (mem.kind()) {
539 case Operand::MEM_REG_DISP:
540 masm.cmpxchgq(src.encoding(), mem.disp(), mem.base());
541 break;
542 case Operand::MEM_SCALE:
543 masm.cmpxchgq(src.encoding(), mem.disp(), mem.base(), mem.index(),
544 mem.scale());
545 break;
546 default:
547 MOZ_CRASH("unexpected operand kind");
548 }
549 }
550
xchgq(Register src,Register dest)551 void xchgq(Register src, Register dest) {
552 masm.xchgq_rr(src.encoding(), dest.encoding());
553 }
554
xchgq(Register src,const Operand & mem)555 void xchgq(Register src, const Operand& mem) {
556 switch (mem.kind()) {
557 case Operand::MEM_REG_DISP:
558 masm.xchgq_rm(src.encoding(), mem.disp(), mem.base());
559 break;
560 case Operand::MEM_SCALE:
561 masm.xchgq_rm(src.encoding(), mem.disp(), mem.base(), mem.index(),
562 mem.scale());
563 break;
564 default:
565 MOZ_CRASH("unexpected operand kind");
566 }
567 }
568
lock_xaddq(Register srcdest,const Operand & mem)569 void lock_xaddq(Register srcdest, const Operand& mem) {
570 switch (mem.kind()) {
571 case Operand::MEM_REG_DISP:
572 masm.lock_xaddq_rm(srcdest.encoding(), mem.disp(), mem.base());
573 break;
574 case Operand::MEM_SCALE:
575 masm.lock_xaddq_rm(srcdest.encoding(), mem.disp(), mem.base(),
576 mem.index(), mem.scale());
577 break;
578 default:
579 MOZ_CRASH("unexpected operand kind");
580 }
581 }
582
movsbq(const Operand & src,Register dest)583 void movsbq(const Operand& src, Register dest) {
584 switch (src.kind()) {
585 case Operand::REG:
586 masm.movsbq_rr(src.reg(), dest.encoding());
587 break;
588 case Operand::MEM_REG_DISP:
589 masm.movsbq_mr(src.disp(), src.base(), dest.encoding());
590 break;
591 case Operand::MEM_SCALE:
592 masm.movsbq_mr(src.disp(), src.base(), src.index(), src.scale(),
593 dest.encoding());
594 break;
595 default:
596 MOZ_CRASH("unexpected operand kind");
597 }
598 }
599
movzbq(const Operand & src,Register dest)600 void movzbq(const Operand& src, Register dest) {
601 // movzbl zero-extends to 64 bits and is one byte smaller, so use that
602 // instead.
603 movzbl(src, dest);
604 }
605
movswq(const Operand & src,Register dest)606 void movswq(const Operand& src, Register dest) {
607 switch (src.kind()) {
608 case Operand::REG:
609 masm.movswq_rr(src.reg(), dest.encoding());
610 break;
611 case Operand::MEM_REG_DISP:
612 masm.movswq_mr(src.disp(), src.base(), dest.encoding());
613 break;
614 case Operand::MEM_SCALE:
615 masm.movswq_mr(src.disp(), src.base(), src.index(), src.scale(),
616 dest.encoding());
617 break;
618 default:
619 MOZ_CRASH("unexpected operand kind");
620 }
621 }
622
movzwq(const Operand & src,Register dest)623 void movzwq(const Operand& src, Register dest) {
624 // movzwl zero-extends to 64 bits and is one byte smaller, so use that
625 // instead.
626 movzwl(src, dest);
627 }
628
movslq(Register src,Register dest)629 void movslq(Register src, Register dest) {
630 masm.movslq_rr(src.encoding(), dest.encoding());
631 }
movslq(const Operand & src,Register dest)632 void movslq(const Operand& src, Register dest) {
633 switch (src.kind()) {
634 case Operand::REG:
635 masm.movslq_rr(src.reg(), dest.encoding());
636 break;
637 case Operand::MEM_REG_DISP:
638 masm.movslq_mr(src.disp(), src.base(), dest.encoding());
639 break;
640 case Operand::MEM_SCALE:
641 masm.movslq_mr(src.disp(), src.base(), src.index(), src.scale(),
642 dest.encoding());
643 break;
644 default:
645 MOZ_CRASH("unexpected operand kind");
646 }
647 }
648
andq(Register src,Register dest)649 void andq(Register src, Register dest) {
650 masm.andq_rr(src.encoding(), dest.encoding());
651 }
andq(Imm32 imm,Register dest)652 void andq(Imm32 imm, Register dest) {
653 masm.andq_ir(imm.value, dest.encoding());
654 }
andq(const Operand & src,Register dest)655 void andq(const Operand& src, Register dest) {
656 switch (src.kind()) {
657 case Operand::REG:
658 masm.andq_rr(src.reg(), dest.encoding());
659 break;
660 case Operand::MEM_REG_DISP:
661 masm.andq_mr(src.disp(), src.base(), dest.encoding());
662 break;
663 case Operand::MEM_SCALE:
664 masm.andq_mr(src.disp(), src.base(), src.index(), src.scale(),
665 dest.encoding());
666 break;
667 case Operand::MEM_ADDRESS32:
668 masm.andq_mr(src.address(), dest.encoding());
669 break;
670 default:
671 MOZ_CRASH("unexpected operand kind");
672 }
673 }
andq(Register src,const Operand & dest)674 void andq(Register src, const Operand& dest) {
675 switch (dest.kind()) {
676 case Operand::REG:
677 masm.andq_rr(src.encoding(), dest.reg());
678 break;
679 case Operand::MEM_REG_DISP:
680 masm.andq_rm(src.encoding(), dest.disp(), dest.base());
681 break;
682 case Operand::MEM_SCALE:
683 masm.andq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
684 dest.scale());
685 break;
686 default:
687 MOZ_CRASH("unexpected operand kind");
688 }
689 }
690
addq(Imm32 imm,Register dest)691 void addq(Imm32 imm, Register dest) {
692 masm.addq_ir(imm.value, dest.encoding());
693 }
addqWithPatch(Imm32 imm,Register dest)694 CodeOffset addqWithPatch(Imm32 imm, Register dest) {
695 masm.addq_i32r(imm.value, dest.encoding());
696 return CodeOffset(masm.currentOffset());
697 }
addq(Imm32 imm,const Operand & dest)698 void addq(Imm32 imm, const Operand& dest) {
699 switch (dest.kind()) {
700 case Operand::REG:
701 masm.addq_ir(imm.value, dest.reg());
702 break;
703 case Operand::MEM_REG_DISP:
704 masm.addq_im(imm.value, dest.disp(), dest.base());
705 break;
706 case Operand::MEM_ADDRESS32:
707 masm.addq_im(imm.value, dest.address());
708 break;
709 default:
710 MOZ_CRASH("unexpected operand kind");
711 }
712 }
addq(Register src,Register dest)713 void addq(Register src, Register dest) {
714 masm.addq_rr(src.encoding(), dest.encoding());
715 }
addq(const Operand & src,Register dest)716 void addq(const Operand& src, Register dest) {
717 switch (src.kind()) {
718 case Operand::REG:
719 masm.addq_rr(src.reg(), dest.encoding());
720 break;
721 case Operand::MEM_REG_DISP:
722 masm.addq_mr(src.disp(), src.base(), dest.encoding());
723 break;
724 case Operand::MEM_ADDRESS32:
725 masm.addq_mr(src.address(), dest.encoding());
726 break;
727 case Operand::MEM_SCALE:
728 masm.addq_mr(src.disp(), src.base(), src.index(), src.scale(),
729 dest.encoding());
730 break;
731 default:
732 MOZ_CRASH("unexpected operand kind");
733 }
734 }
addq(Register src,const Operand & dest)735 void addq(Register src, const Operand& dest) {
736 switch (dest.kind()) {
737 case Operand::REG:
738 masm.addq_rr(src.encoding(), dest.reg());
739 break;
740 case Operand::MEM_REG_DISP:
741 masm.addq_rm(src.encoding(), dest.disp(), dest.base());
742 break;
743 case Operand::MEM_SCALE:
744 masm.addq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
745 dest.scale());
746 break;
747 default:
748 MOZ_CRASH("unexpected operand kind");
749 }
750 }
751
subq(Imm32 imm,Register dest)752 void subq(Imm32 imm, Register dest) {
753 masm.subq_ir(imm.value, dest.encoding());
754 }
subq(Register src,Register dest)755 void subq(Register src, Register dest) {
756 masm.subq_rr(src.encoding(), dest.encoding());
757 }
subq(const Operand & src,Register dest)758 void subq(const Operand& src, Register dest) {
759 switch (src.kind()) {
760 case Operand::REG:
761 masm.subq_rr(src.reg(), dest.encoding());
762 break;
763 case Operand::MEM_REG_DISP:
764 masm.subq_mr(src.disp(), src.base(), dest.encoding());
765 break;
766 case Operand::MEM_ADDRESS32:
767 masm.subq_mr(src.address(), dest.encoding());
768 break;
769 default:
770 MOZ_CRASH("unexpected operand kind");
771 }
772 }
subq(Register src,const Operand & dest)773 void subq(Register src, const Operand& dest) {
774 switch (dest.kind()) {
775 case Operand::REG:
776 masm.subq_rr(src.encoding(), dest.reg());
777 break;
778 case Operand::MEM_REG_DISP:
779 masm.subq_rm(src.encoding(), dest.disp(), dest.base());
780 break;
781 case Operand::MEM_SCALE:
782 masm.subq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
783 dest.scale());
784 break;
785 default:
786 MOZ_CRASH("unexpected operand kind");
787 }
788 }
shlq(Imm32 imm,Register dest)789 void shlq(Imm32 imm, Register dest) {
790 masm.shlq_ir(imm.value, dest.encoding());
791 }
shrq(Imm32 imm,Register dest)792 void shrq(Imm32 imm, Register dest) {
793 masm.shrq_ir(imm.value, dest.encoding());
794 }
sarq(Imm32 imm,Register dest)795 void sarq(Imm32 imm, Register dest) {
796 masm.sarq_ir(imm.value, dest.encoding());
797 }
shlq_cl(Register dest)798 void shlq_cl(Register dest) { masm.shlq_CLr(dest.encoding()); }
shrq_cl(Register dest)799 void shrq_cl(Register dest) { masm.shrq_CLr(dest.encoding()); }
sarq_cl(Register dest)800 void sarq_cl(Register dest) { masm.sarq_CLr(dest.encoding()); }
rolq(Imm32 imm,Register dest)801 void rolq(Imm32 imm, Register dest) {
802 masm.rolq_ir(imm.value, dest.encoding());
803 }
rolq_cl(Register dest)804 void rolq_cl(Register dest) { masm.rolq_CLr(dest.encoding()); }
rorq(Imm32 imm,Register dest)805 void rorq(Imm32 imm, Register dest) {
806 masm.rorq_ir(imm.value, dest.encoding());
807 }
rorq_cl(Register dest)808 void rorq_cl(Register dest) { masm.rorq_CLr(dest.encoding()); }
orq(Imm32 imm,Register dest)809 void orq(Imm32 imm, Register dest) {
810 masm.orq_ir(imm.value, dest.encoding());
811 }
orq(Register src,Register dest)812 void orq(Register src, Register dest) {
813 masm.orq_rr(src.encoding(), dest.encoding());
814 }
orq(const Operand & src,Register dest)815 void orq(const Operand& src, Register dest) {
816 switch (src.kind()) {
817 case Operand::REG:
818 masm.orq_rr(src.reg(), dest.encoding());
819 break;
820 case Operand::MEM_REG_DISP:
821 masm.orq_mr(src.disp(), src.base(), dest.encoding());
822 break;
823 case Operand::MEM_ADDRESS32:
824 masm.orq_mr(src.address(), dest.encoding());
825 break;
826 default:
827 MOZ_CRASH("unexpected operand kind");
828 }
829 }
orq(Register src,const Operand & dest)830 void orq(Register src, const Operand& dest) {
831 switch (dest.kind()) {
832 case Operand::REG:
833 masm.orq_rr(src.encoding(), dest.reg());
834 break;
835 case Operand::MEM_REG_DISP:
836 masm.orq_rm(src.encoding(), dest.disp(), dest.base());
837 break;
838 case Operand::MEM_SCALE:
839 masm.orq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
840 dest.scale());
841 break;
842 default:
843 MOZ_CRASH("unexpected operand kind");
844 }
845 }
xorq(Register src,Register dest)846 void xorq(Register src, Register dest) {
847 masm.xorq_rr(src.encoding(), dest.encoding());
848 }
xorq(Imm32 imm,Register dest)849 void xorq(Imm32 imm, Register dest) {
850 masm.xorq_ir(imm.value, dest.encoding());
851 }
xorq(const Operand & src,Register dest)852 void xorq(const Operand& src, Register dest) {
853 switch (src.kind()) {
854 case Operand::REG:
855 masm.xorq_rr(src.reg(), dest.encoding());
856 break;
857 case Operand::MEM_REG_DISP:
858 masm.xorq_mr(src.disp(), src.base(), dest.encoding());
859 break;
860 case Operand::MEM_SCALE:
861 masm.xorq_mr(src.disp(), src.base(), src.index(), src.scale(),
862 dest.encoding());
863 break;
864 case Operand::MEM_ADDRESS32:
865 masm.xorq_mr(src.address(), dest.encoding());
866 break;
867 default:
868 MOZ_CRASH("unexpected operand kind");
869 }
870 }
xorq(Register src,const Operand & dest)871 void xorq(Register src, const Operand& dest) {
872 switch (dest.kind()) {
873 case Operand::REG:
874 masm.xorq_rr(src.encoding(), dest.reg());
875 break;
876 case Operand::MEM_REG_DISP:
877 masm.xorq_rm(src.encoding(), dest.disp(), dest.base());
878 break;
879 case Operand::MEM_SCALE:
880 masm.xorq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
881 dest.scale());
882 break;
883 default:
884 MOZ_CRASH("unexpected operand kind");
885 }
886 }
887
bsrq(const Register & src,const Register & dest)888 void bsrq(const Register& src, const Register& dest) {
889 masm.bsrq_rr(src.encoding(), dest.encoding());
890 }
bsfq(const Register & src,const Register & dest)891 void bsfq(const Register& src, const Register& dest) {
892 masm.bsfq_rr(src.encoding(), dest.encoding());
893 }
bswapq(const Register & reg)894 void bswapq(const Register& reg) { masm.bswapq_r(reg.encoding()); }
popcntq(const Register & src,const Register & dest)895 void popcntq(const Register& src, const Register& dest) {
896 masm.popcntq_rr(src.encoding(), dest.encoding());
897 }
898
imulq(Register src,Register dest)899 void imulq(Register src, Register dest) {
900 masm.imulq_rr(src.encoding(), dest.encoding());
901 }
imulq(const Operand & src,Register dest)902 void imulq(const Operand& src, Register dest) {
903 switch (src.kind()) {
904 case Operand::REG:
905 masm.imulq_rr(src.reg(), dest.encoding());
906 break;
907 case Operand::MEM_REG_DISP:
908 masm.imulq_mr(src.disp(), src.base(), dest.encoding());
909 break;
910 case Operand::MEM_ADDRESS32:
911 MOZ_CRASH("NYI");
912 break;
913 default:
914 MOZ_CRASH("unexpected operand kind");
915 }
916 }
917
cqo()918 void cqo() { masm.cqo(); }
idivq(Register divisor)919 void idivq(Register divisor) { masm.idivq_r(divisor.encoding()); }
udivq(Register divisor)920 void udivq(Register divisor) { masm.divq_r(divisor.encoding()); }
921
vcvtsi2sdq(Register src,FloatRegister dest)922 void vcvtsi2sdq(Register src, FloatRegister dest) {
923 masm.vcvtsi2sdq_rr(src.encoding(), dest.encoding());
924 }
925
vpextrq(unsigned lane,FloatRegister src,Register dest)926 void vpextrq(unsigned lane, FloatRegister src, Register dest) {
927 MOZ_ASSERT(HasSSE41());
928 masm.vpextrq_irr(lane, src.encoding(), dest.encoding());
929 }
930
vpinsrq(unsigned lane,Register src1,FloatRegister src0,FloatRegister dest)931 void vpinsrq(unsigned lane, Register src1, FloatRegister src0,
932 FloatRegister dest) {
933 MOZ_ASSERT(HasSSE41());
934 masm.vpinsrq_irr(lane, src1.encoding(), src0.encoding(), dest.encoding());
935 }
936
negq(Register reg)937 void negq(Register reg) { masm.negq_r(reg.encoding()); }
938
mov(ImmWord word,Register dest)939 void mov(ImmWord word, Register dest) {
940 // Use xor for setting registers to zero, as it is specially optimized
941 // for this purpose on modern hardware. Note that it does clobber FLAGS
942 // though. Use xorl instead of xorq since they are functionally
943 // equivalent (32-bit instructions zero-extend their results to 64 bits)
944 // and xorl has a smaller encoding.
945 if (word.value == 0) {
946 xorl(dest, dest);
947 } else {
948 movq(word, dest);
949 }
950 }
mov(ImmPtr imm,Register dest)951 void mov(ImmPtr imm, Register dest) { movq(imm, dest); }
mov(wasm::SymbolicAddress imm,Register dest)952 void mov(wasm::SymbolicAddress imm, Register dest) {
953 masm.movq_i64r(-1, dest.encoding());
954 append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm));
955 }
mov(const Operand & src,Register dest)956 void mov(const Operand& src, Register dest) { movq(src, dest); }
mov(Register src,const Operand & dest)957 void mov(Register src, const Operand& dest) { movq(src, dest); }
mov(Imm32 imm32,const Operand & dest)958 void mov(Imm32 imm32, const Operand& dest) { movq(imm32, dest); }
mov(Register src,Register dest)959 void mov(Register src, Register dest) { movq(src, dest); }
mov(CodeLabel * label,Register dest)960 void mov(CodeLabel* label, Register dest) {
961 masm.movq_i64r(/* placeholder */ 0, dest.encoding());
962 label->patchAt()->bind(masm.size());
963 }
xchg(Register src,Register dest)964 void xchg(Register src, Register dest) { xchgq(src, dest); }
965
lea(const Operand & src,Register dest)966 void lea(const Operand& src, Register dest) {
967 switch (src.kind()) {
968 case Operand::MEM_REG_DISP:
969 masm.leaq_mr(src.disp(), src.base(), dest.encoding());
970 break;
971 case Operand::MEM_SCALE:
972 masm.leaq_mr(src.disp(), src.base(), src.index(), src.scale(),
973 dest.encoding());
974 break;
975 default:
976 MOZ_CRASH("unexepcted operand kind");
977 }
978 }
979
cmovz32(const Operand & src,Register dest)980 void cmovz32(const Operand& src, Register dest) { return cmovzl(src, dest); }
cmovzPtr(const Operand & src,Register dest)981 void cmovzPtr(const Operand& src, Register dest) { return cmovzq(src, dest); }
982
loadRipRelativeInt32(Register dest)983 CodeOffset loadRipRelativeInt32(Register dest) {
984 return CodeOffset(masm.movl_ripr(dest.encoding()).offset());
985 }
loadRipRelativeInt64(Register dest)986 CodeOffset loadRipRelativeInt64(Register dest) {
987 return CodeOffset(masm.movq_ripr(dest.encoding()).offset());
988 }
loadRipRelativeDouble(FloatRegister dest)989 CodeOffset loadRipRelativeDouble(FloatRegister dest) {
990 return CodeOffset(masm.vmovsd_ripr(dest.encoding()).offset());
991 }
loadRipRelativeFloat32(FloatRegister dest)992 CodeOffset loadRipRelativeFloat32(FloatRegister dest) {
993 return CodeOffset(masm.vmovss_ripr(dest.encoding()).offset());
994 }
loadRipRelativeInt32x4(FloatRegister dest)995 CodeOffset loadRipRelativeInt32x4(FloatRegister dest) {
996 return CodeOffset(masm.vmovdqa_ripr(dest.encoding()).offset());
997 }
loadRipRelativeFloat32x4(FloatRegister dest)998 CodeOffset loadRipRelativeFloat32x4(FloatRegister dest) {
999 return CodeOffset(masm.vmovaps_ripr(dest.encoding()).offset());
1000 }
storeRipRelativeInt32(Register dest)1001 CodeOffset storeRipRelativeInt32(Register dest) {
1002 return CodeOffset(masm.movl_rrip(dest.encoding()).offset());
1003 }
storeRipRelativeInt64(Register dest)1004 CodeOffset storeRipRelativeInt64(Register dest) {
1005 return CodeOffset(masm.movq_rrip(dest.encoding()).offset());
1006 }
storeRipRelativeDouble(FloatRegister dest)1007 CodeOffset storeRipRelativeDouble(FloatRegister dest) {
1008 return CodeOffset(masm.vmovsd_rrip(dest.encoding()).offset());
1009 }
storeRipRelativeFloat32(FloatRegister dest)1010 CodeOffset storeRipRelativeFloat32(FloatRegister dest) {
1011 return CodeOffset(masm.vmovss_rrip(dest.encoding()).offset());
1012 }
storeRipRelativeInt32x4(FloatRegister dest)1013 CodeOffset storeRipRelativeInt32x4(FloatRegister dest) {
1014 return CodeOffset(masm.vmovdqa_rrip(dest.encoding()).offset());
1015 }
storeRipRelativeFloat32x4(FloatRegister dest)1016 CodeOffset storeRipRelativeFloat32x4(FloatRegister dest) {
1017 return CodeOffset(masm.vmovaps_rrip(dest.encoding()).offset());
1018 }
leaRipRelative(Register dest)1019 CodeOffset leaRipRelative(Register dest) {
1020 return CodeOffset(masm.leaq_rip(dest.encoding()).offset());
1021 }
1022
cmpq(Register rhs,Register lhs)1023 void cmpq(Register rhs, Register lhs) {
1024 masm.cmpq_rr(rhs.encoding(), lhs.encoding());
1025 }
cmpq(Register rhs,const Operand & lhs)1026 void cmpq(Register rhs, const Operand& lhs) {
1027 switch (lhs.kind()) {
1028 case Operand::REG:
1029 masm.cmpq_rr(rhs.encoding(), lhs.reg());
1030 break;
1031 case Operand::MEM_REG_DISP:
1032 masm.cmpq_rm(rhs.encoding(), lhs.disp(), lhs.base());
1033 break;
1034 case Operand::MEM_ADDRESS32:
1035 masm.cmpq_rm(rhs.encoding(), lhs.address());
1036 break;
1037 default:
1038 MOZ_CRASH("unexpected operand kind");
1039 }
1040 }
cmpq(Imm32 rhs,Register lhs)1041 void cmpq(Imm32 rhs, Register lhs) {
1042 masm.cmpq_ir(rhs.value, lhs.encoding());
1043 }
cmpq(Imm32 rhs,const Operand & lhs)1044 void cmpq(Imm32 rhs, const Operand& lhs) {
1045 switch (lhs.kind()) {
1046 case Operand::REG:
1047 masm.cmpq_ir(rhs.value, lhs.reg());
1048 break;
1049 case Operand::MEM_REG_DISP:
1050 masm.cmpq_im(rhs.value, lhs.disp(), lhs.base());
1051 break;
1052 case Operand::MEM_SCALE:
1053 masm.cmpq_im(rhs.value, lhs.disp(), lhs.base(), lhs.index(),
1054 lhs.scale());
1055 break;
1056 case Operand::MEM_ADDRESS32:
1057 masm.cmpq_im(rhs.value, lhs.address());
1058 break;
1059 default:
1060 MOZ_CRASH("unexpected operand kind");
1061 }
1062 }
cmpq(const Operand & rhs,Register lhs)1063 void cmpq(const Operand& rhs, Register lhs) {
1064 switch (rhs.kind()) {
1065 case Operand::REG:
1066 masm.cmpq_rr(rhs.reg(), lhs.encoding());
1067 break;
1068 case Operand::MEM_REG_DISP:
1069 masm.cmpq_mr(rhs.disp(), rhs.base(), lhs.encoding());
1070 break;
1071 default:
1072 MOZ_CRASH("unexpected operand kind");
1073 }
1074 }
1075
testq(Imm32 rhs,Register lhs)1076 void testq(Imm32 rhs, Register lhs) {
1077 masm.testq_ir(rhs.value, lhs.encoding());
1078 }
testq(Register rhs,Register lhs)1079 void testq(Register rhs, Register lhs) {
1080 masm.testq_rr(rhs.encoding(), lhs.encoding());
1081 }
testq(Imm32 rhs,const Operand & lhs)1082 void testq(Imm32 rhs, const Operand& lhs) {
1083 switch (lhs.kind()) {
1084 case Operand::REG:
1085 masm.testq_ir(rhs.value, lhs.reg());
1086 break;
1087 case Operand::MEM_REG_DISP:
1088 masm.testq_i32m(rhs.value, lhs.disp(), lhs.base());
1089 break;
1090 default:
1091 MOZ_CRASH("unexpected operand kind");
1092 break;
1093 }
1094 }
1095
1096 void jmp(ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
1097 JmpSrc src = masm.jmp();
1098 addPendingJump(src, target, reloc);
1099 }
1100 void j(Condition cond, ImmPtr target,
1101 RelocationKind reloc = RelocationKind::HARDCODED) {
1102 JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
1103 addPendingJump(src, target, reloc);
1104 }
1105
jmp(JitCode * target)1106 void jmp(JitCode* target) {
1107 jmp(ImmPtr(target->raw()), RelocationKind::JITCODE);
1108 }
j(Condition cond,JitCode * target)1109 void j(Condition cond, JitCode* target) {
1110 j(cond, ImmPtr(target->raw()), RelocationKind::JITCODE);
1111 }
call(JitCode * target)1112 void call(JitCode* target) {
1113 JmpSrc src = masm.call();
1114 addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
1115 }
call(ImmWord target)1116 void call(ImmWord target) { call(ImmPtr((void*)target.value)); }
call(ImmPtr target)1117 void call(ImmPtr target) {
1118 JmpSrc src = masm.call();
1119 addPendingJump(src, target, RelocationKind::HARDCODED);
1120 }
1121
1122 // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
1123 // this instruction.
toggledCall(JitCode * target,bool enabled)1124 CodeOffset toggledCall(JitCode* target, bool enabled) {
1125 CodeOffset offset(size());
1126 JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
1127 addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
1128 MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
1129 return offset;
1130 }
1131
ToggledCallSize(uint8_t * code)1132 static size_t ToggledCallSize(uint8_t* code) {
1133 // Size of a call instruction.
1134 return 5;
1135 }
1136
1137 // Do not mask shared implementations.
1138 using AssemblerX86Shared::call;
1139
vcvttsd2sq(FloatRegister src,Register dest)1140 void vcvttsd2sq(FloatRegister src, Register dest) {
1141 masm.vcvttsd2sq_rr(src.encoding(), dest.encoding());
1142 }
vcvttss2sq(FloatRegister src,Register dest)1143 void vcvttss2sq(FloatRegister src, Register dest) {
1144 masm.vcvttss2sq_rr(src.encoding(), dest.encoding());
1145 }
vcvtsq2sd(Register src1,FloatRegister src0,FloatRegister dest)1146 void vcvtsq2sd(Register src1, FloatRegister src0, FloatRegister dest) {
1147 masm.vcvtsq2sd_rr(src1.encoding(), src0.encoding(), dest.encoding());
1148 }
vcvtsq2ss(Register src1,FloatRegister src0,FloatRegister dest)1149 void vcvtsq2ss(Register src1, FloatRegister src0, FloatRegister dest) {
1150 masm.vcvtsq2ss_rr(src1.encoding(), src0.encoding(), dest.encoding());
1151 }
1152 };
1153
GetIntArgReg(uint32_t intArg,uint32_t floatArg,Register * out)1154 static inline bool GetIntArgReg(uint32_t intArg, uint32_t floatArg,
1155 Register* out) {
1156 #if defined(_WIN64)
1157 uint32_t arg = intArg + floatArg;
1158 #else
1159 uint32_t arg = intArg;
1160 #endif
1161 if (arg >= NumIntArgRegs) {
1162 return false;
1163 }
1164 *out = IntArgRegs[arg];
1165 return true;
1166 }
1167
1168 // Get a register in which we plan to put a quantity that will be used as an
1169 // integer argument. This differs from GetIntArgReg in that if we have no more
1170 // actual argument registers to use we will fall back on using whatever
1171 // CallTempReg* don't overlap the argument registers, and only fail once those
1172 // run out too.
GetTempRegForIntArg(uint32_t usedIntArgs,uint32_t usedFloatArgs,Register * out)1173 static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
1174 uint32_t usedFloatArgs, Register* out) {
1175 if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) {
1176 return true;
1177 }
1178 // Unfortunately, we have to assume things about the point at which
1179 // GetIntArgReg returns false, because we need to know how many registers it
1180 // can allocate.
1181 #if defined(_WIN64)
1182 uint32_t arg = usedIntArgs + usedFloatArgs;
1183 #else
1184 uint32_t arg = usedIntArgs;
1185 #endif
1186 arg -= NumIntArgRegs;
1187 if (arg >= NumCallTempNonArgRegs) {
1188 return false;
1189 }
1190 *out = CallTempNonArgRegs[arg];
1191 return true;
1192 }
1193
GetFloatArgReg(uint32_t intArg,uint32_t floatArg,FloatRegister * out)1194 static inline bool GetFloatArgReg(uint32_t intArg, uint32_t floatArg,
1195 FloatRegister* out) {
1196 #if defined(_WIN64)
1197 uint32_t arg = intArg + floatArg;
1198 #else
1199 uint32_t arg = floatArg;
1200 #endif
1201 if (floatArg >= NumFloatArgRegs) {
1202 return false;
1203 }
1204 *out = FloatArgRegs[arg];
1205 return true;
1206 }
1207
1208 } // namespace jit
1209 } // namespace js
1210
1211 #endif /* jit_x64_Assembler_x64_h */
1212