1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_x64_Assembler_x64_h
8 #define jit_x64_Assembler_x64_h
9
10 #include "mozilla/ArrayUtils.h"
11
12 #include "jit/IonCode.h"
13 #include "jit/JitCompartment.h"
14 #include "jit/shared/Assembler-shared.h"
15
16 namespace js {
17 namespace jit {
18
19 static constexpr Register rax{X86Encoding::rax};
20 static constexpr Register rbx{X86Encoding::rbx};
21 static constexpr Register rcx{X86Encoding::rcx};
22 static constexpr Register rdx{X86Encoding::rdx};
23 static constexpr Register rsi{X86Encoding::rsi};
24 static constexpr Register rdi{X86Encoding::rdi};
25 static constexpr Register rbp{X86Encoding::rbp};
26 static constexpr Register r8{X86Encoding::r8};
27 static constexpr Register r9{X86Encoding::r9};
28 static constexpr Register r10{X86Encoding::r10};
29 static constexpr Register r11{X86Encoding::r11};
30 static constexpr Register r12{X86Encoding::r12};
31 static constexpr Register r13{X86Encoding::r13};
32 static constexpr Register r14{X86Encoding::r14};
33 static constexpr Register r15{X86Encoding::r15};
34 static constexpr Register rsp{X86Encoding::rsp};
35
36 static constexpr FloatRegister xmm0 =
37 FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
38 static constexpr FloatRegister xmm1 =
39 FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
40 static constexpr FloatRegister xmm2 =
41 FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
42 static constexpr FloatRegister xmm3 =
43 FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
44 static constexpr FloatRegister xmm4 =
45 FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
46 static constexpr FloatRegister xmm5 =
47 FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
48 static constexpr FloatRegister xmm6 =
49 FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
50 static constexpr FloatRegister xmm7 =
51 FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
52 static constexpr FloatRegister xmm8 =
53 FloatRegister(X86Encoding::xmm8, FloatRegisters::Double);
54 static constexpr FloatRegister xmm9 =
55 FloatRegister(X86Encoding::xmm9, FloatRegisters::Double);
56 static constexpr FloatRegister xmm10 =
57 FloatRegister(X86Encoding::xmm10, FloatRegisters::Double);
58 static constexpr FloatRegister xmm11 =
59 FloatRegister(X86Encoding::xmm11, FloatRegisters::Double);
60 static constexpr FloatRegister xmm12 =
61 FloatRegister(X86Encoding::xmm12, FloatRegisters::Double);
62 static constexpr FloatRegister xmm13 =
63 FloatRegister(X86Encoding::xmm13, FloatRegisters::Double);
64 static constexpr FloatRegister xmm14 =
65 FloatRegister(X86Encoding::xmm14, FloatRegisters::Double);
66 static constexpr FloatRegister xmm15 =
67 FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
68
69 // X86-common synonyms.
70 static constexpr Register eax = rax;
71 static constexpr Register ebx = rbx;
72 static constexpr Register ecx = rcx;
73 static constexpr Register edx = rdx;
74 static constexpr Register esi = rsi;
75 static constexpr Register edi = rdi;
76 static constexpr Register ebp = rbp;
77 static constexpr Register esp = rsp;
78
79 static constexpr Register InvalidReg{X86Encoding::invalid_reg};
80 static constexpr FloatRegister InvalidFloatReg = FloatRegister();
81
82 static constexpr Register StackPointer = rsp;
83 static constexpr Register FramePointer = rbp;
84 static constexpr Register JSReturnReg = rcx;
85 // Avoid, except for assertions.
86 static constexpr Register JSReturnReg_Type = JSReturnReg;
87 static constexpr Register JSReturnReg_Data = JSReturnReg;
88
89 static constexpr Register ScratchReg = r11;
90
91 // Helper class for ScratchRegister usage. Asserts that only one piece
92 // of code thinks it has exclusive ownership of the scratch register.
93 struct ScratchRegisterScope : public AutoRegisterScope {
ScratchRegisterScopeScratchRegisterScope94 explicit ScratchRegisterScope(MacroAssembler& masm)
95 : AutoRegisterScope(masm, ScratchReg) {}
96 };
97
98 static constexpr Register ReturnReg = rax;
99 static constexpr Register HeapReg = r15;
100 static constexpr Register64 ReturnReg64(rax);
101 static constexpr FloatRegister ReturnFloat32Reg =
102 FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
103 static constexpr FloatRegister ReturnDoubleReg =
104 FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
105 static constexpr FloatRegister ReturnSimd128Reg =
106 FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
107 static constexpr FloatRegister ScratchFloat32Reg =
108 FloatRegister(X86Encoding::xmm15, FloatRegisters::Single);
109 static constexpr FloatRegister ScratchDoubleReg =
110 FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
111 static constexpr FloatRegister ScratchSimd128Reg = xmm15;
112
113 // Avoid rbp, which is the FramePointer, which is unavailable in some modes.
114 static constexpr Register CallTempReg0 = rax;
115 static constexpr Register CallTempReg1 = rdi;
116 static constexpr Register CallTempReg2 = rbx;
117 static constexpr Register CallTempReg3 = rcx;
118 static constexpr Register CallTempReg4 = rsi;
119 static constexpr Register CallTempReg5 = rdx;
120
121 // Different argument registers for WIN64
122 #if defined(_WIN64)
123 static constexpr Register IntArgReg0 = rcx;
124 static constexpr Register IntArgReg1 = rdx;
125 static constexpr Register IntArgReg2 = r8;
126 static constexpr Register IntArgReg3 = r9;
127 static constexpr uint32_t NumIntArgRegs = 4;
128 // Use "const" instead of constexpr here to work around a bug
129 // of VS2015 Update 1. See bug 1229604.
130 static const Register IntArgRegs[NumIntArgRegs] = {rcx, rdx, r8, r9};
131
132 static const Register CallTempNonArgRegs[] = {rax, rdi, rbx, rsi};
133 static const uint32_t NumCallTempNonArgRegs =
134 mozilla::ArrayLength(CallTempNonArgRegs);
135
136 static constexpr FloatRegister FloatArgReg0 = xmm0;
137 static constexpr FloatRegister FloatArgReg1 = xmm1;
138 static constexpr FloatRegister FloatArgReg2 = xmm2;
139 static constexpr FloatRegister FloatArgReg3 = xmm3;
140 static const uint32_t NumFloatArgRegs = 4;
141 static constexpr FloatRegister FloatArgRegs[NumFloatArgRegs] = {xmm0, xmm1,
142 xmm2, xmm3};
143 #else
144 static constexpr Register IntArgReg0 = rdi;
145 static constexpr Register IntArgReg1 = rsi;
146 static constexpr Register IntArgReg2 = rdx;
147 static constexpr Register IntArgReg3 = rcx;
148 static constexpr Register IntArgReg4 = r8;
149 static constexpr Register IntArgReg5 = r9;
150 static constexpr uint32_t NumIntArgRegs = 6;
151 static const Register IntArgRegs[NumIntArgRegs] = {rdi, rsi, rdx, rcx, r8, r9};
152
153 // Use "const" instead of constexpr here to work around a bug
154 // of VS2015 Update 1. See bug 1229604.
155 static const Register CallTempNonArgRegs[] = {rax, rbx};
156 static const uint32_t NumCallTempNonArgRegs =
157 mozilla::ArrayLength(CallTempNonArgRegs);
158
159 static constexpr FloatRegister FloatArgReg0 = xmm0;
160 static constexpr FloatRegister FloatArgReg1 = xmm1;
161 static constexpr FloatRegister FloatArgReg2 = xmm2;
162 static constexpr FloatRegister FloatArgReg3 = xmm3;
163 static constexpr FloatRegister FloatArgReg4 = xmm4;
164 static constexpr FloatRegister FloatArgReg5 = xmm5;
165 static constexpr FloatRegister FloatArgReg6 = xmm6;
166 static constexpr FloatRegister FloatArgReg7 = xmm7;
167 static constexpr uint32_t NumFloatArgRegs = 8;
168 static constexpr FloatRegister FloatArgRegs[NumFloatArgRegs] = {
169 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7};
170 #endif
171
172 // Registers used in the GenerateFFIIonExit Enable Activation block.
173 static constexpr Register WasmIonExitRegCallee = r10;
174 static constexpr Register WasmIonExitRegE0 = rax;
175 static constexpr Register WasmIonExitRegE1 = rdi;
176
177 // Registers used in the GenerateFFIIonExit Disable Activation block.
178 static constexpr Register WasmIonExitRegReturnData = ecx;
179 static constexpr Register WasmIonExitRegReturnType = ecx;
180 static constexpr Register WasmIonExitTlsReg = r14;
181 static constexpr Register WasmIonExitRegD0 = rax;
182 static constexpr Register WasmIonExitRegD1 = rdi;
183 static constexpr Register WasmIonExitRegD2 = rbx;
184
185 // Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
186 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
187 static constexpr Register RegExpMatcherStringReg = CallTempReg1;
188 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
189
190 // Registerd used in RegExpTester instruction (do not use ReturnReg).
191 static constexpr Register RegExpTesterRegExpReg = CallTempReg1;
192 static constexpr Register RegExpTesterStringReg = CallTempReg2;
193 static constexpr Register RegExpTesterLastIndexReg = CallTempReg3;
194
195 class ABIArgGenerator {
196 #if defined(XP_WIN)
197 unsigned regIndex_;
198 #else
199 unsigned intRegIndex_;
200 unsigned floatRegIndex_;
201 #endif
202 uint32_t stackOffset_;
203 ABIArg current_;
204
205 public:
206 ABIArgGenerator();
207 ABIArg next(MIRType argType);
current()208 ABIArg& current() { return current_; }
stackBytesConsumedSoFar()209 uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
210 };
211
212 // These registers may be volatile or nonvolatile.
213 // Avoid r11, which is the MacroAssembler's ScratchReg.
214 static constexpr Register ABINonArgReg0 = rax;
215 static constexpr Register ABINonArgReg1 = rbx;
216 static constexpr Register ABINonArgReg2 = r10;
217
218 // This register may be volatile or nonvolatile. Avoid xmm15 which is the
219 // ScratchDoubleReg.
220 static constexpr FloatRegister ABINonArgDoubleReg =
221 FloatRegister(X86Encoding::xmm8, FloatRegisters::Double);
222
223 // These registers may be volatile or nonvolatile.
224 // Note: these three registers are all guaranteed to be different
225 static constexpr Register ABINonArgReturnReg0 = r10;
226 static constexpr Register ABINonArgReturnReg1 = r12;
227 static constexpr Register ABINonVolatileReg = r13;
228
229 // This register is guaranteed to be clobberable during the prologue and
230 // epilogue of an ABI call which must preserve both ABI argument, return
231 // and non-volatile registers.
232 static constexpr Register ABINonArgReturnVolatileReg = r10;
233
234 // TLS pointer argument register for WebAssembly functions. This must not alias
235 // any other register used for passing function arguments or return values.
236 // Preserved by WebAssembly functions.
237 static constexpr Register WasmTlsReg = r14;
238
239 // Registers used for asm.js/wasm table calls. These registers must be disjoint
240 // from the ABI argument registers, WasmTlsReg and each other.
241 static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
242 static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
243 static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
244
245 static constexpr Register OsrFrameReg = IntArgReg3;
246
247 static constexpr Register PreBarrierReg = rdx;
248
249 static constexpr uint32_t ABIStackAlignment = 16;
250 static constexpr uint32_t CodeAlignment = 16;
251 static constexpr uint32_t JitStackAlignment = 16;
252
253 static constexpr uint32_t JitStackValueAlignment =
254 JitStackAlignment / sizeof(Value);
255 static_assert(JitStackAlignment % sizeof(Value) == 0 &&
256 JitStackValueAlignment >= 1,
257 "Stack alignment should be a non-zero multiple of sizeof(Value)");
258
259 // This boolean indicates whether we support SIMD instructions flavoured for
260 // this architecture or not. Rather than a method in the LIRGenerator, it is
261 // here such that it is accessible from the entire codebase. Once full support
262 // for SIMD is reached on all tier-1 platforms, this constant can be deleted.
263 static constexpr bool SupportsSimd = true;
264 static constexpr uint32_t SimdMemoryAlignment = 16;
265
266 static_assert(CodeAlignment % SimdMemoryAlignment == 0,
267 "Code alignment should be larger than any of the alignments "
268 "which are used for "
269 "the constant sections of the code buffer. Thus it should be "
270 "larger than the "
271 "alignment for SIMD constants.");
272
273 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
274 "Stack alignment should be larger than any of the alignments "
275 "which are used for "
276 "spilled values. Thus it should be larger than the alignment "
277 "for SIMD accesses.");
278
279 static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
280
281 static const Scale ScalePointer = TimesEight;
282
283 } // namespace jit
284 } // namespace js
285
286 #include "jit/x86-shared/Assembler-x86-shared.h"
287
288 namespace js {
289 namespace jit {
290
291 // Return operand from a JS -> JS call.
292 static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
293
294 class Assembler : public AssemblerX86Shared {
295 // x64 jumps may need extra bits of relocation, because a jump may extend
296 // beyond the signed 32-bit range. To account for this we add an extended
297 // jump table at the bottom of the instruction stream, and if a jump
298 // overflows its range, it will redirect here.
299 //
300 // In our relocation table, we store two offsets instead of one: the offset
301 // to the original jump, and an offset to the extended jump if we will need
302 // to use it instead. The offsets are stored as:
303 // [unsigned] Unsigned offset to short jump, from the start of the code.
304 // [unsigned] Unsigned offset to the extended jump, from the start of
305 // the jump table, in units of SizeOfJumpTableEntry.
306 //
307 // The start of the relocation table contains the offset from the code
308 // buffer to the start of the extended jump table.
309 //
310 // Each entry in this table is a jmp [rip], followed by a ud2 to hint to the
311 // hardware branch predictor that there is no fallthrough, followed by the
312 // eight bytes containing an immediate address. This comes out to 16 bytes.
313 // +1 byte for opcode
314 // +1 byte for mod r/m
315 // +4 bytes for rip-relative offset (2)
316 // +2 bytes for ud2 instruction
317 // +8 bytes for 64-bit address
318 //
319 static const uint32_t SizeOfExtendedJump = 1 + 1 + 4 + 2 + 8;
320 static const uint32_t SizeOfJumpTableEntry = 16;
321
322 uint32_t extendedJumpTable_;
323
324 static JitCode* CodeFromJump(JitCode* code, uint8_t* jump);
325
326 private:
327 void writeRelocation(JmpSrc src, Relocation::Kind reloc);
328 void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc);
329
330 protected:
331 size_t addPatchableJump(JmpSrc src, Relocation::Kind reloc);
332
333 public:
334 using AssemblerX86Shared::j;
335 using AssemblerX86Shared::jmp;
336 using AssemblerX86Shared::pop;
337 using AssemblerX86Shared::push;
338 using AssemblerX86Shared::vmovq;
339
340 static uint8_t* PatchableJumpAddress(JitCode* code, size_t index);
341 static void PatchJumpEntry(uint8_t* entry, uint8_t* target,
342 ReprotectCode reprotect);
343
Assembler()344 Assembler() : extendedJumpTable_(0) {}
345
346 static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
347 CompactBufferReader& reader);
348
349 // The buffer is about to be linked, make sure any constant pools or excess
350 // bookkeeping has been flushed to the instruction stream.
351 void finish();
352
353 // Copy the assembly code to the given buffer, and perform any pending
354 // relocations relying on the target address.
355 void executableCopy(uint8_t* buffer, bool flushICache = true);
356
357 // Actual assembly emitting functions.
358
push(const ImmGCPtr ptr)359 void push(const ImmGCPtr ptr) {
360 movq(ptr, ScratchReg);
361 push(ScratchReg);
362 }
push(const ImmWord ptr)363 void push(const ImmWord ptr) {
364 // We often end up with ImmWords that actually fit into int32.
365 // Be aware of the sign extension behavior.
366 if (ptr.value <= INT32_MAX) {
367 push(Imm32(ptr.value));
368 } else {
369 movq(ptr, ScratchReg);
370 push(ScratchReg);
371 }
372 }
push(ImmPtr imm)373 void push(ImmPtr imm) { push(ImmWord(uintptr_t(imm.value))); }
push(FloatRegister src)374 void push(FloatRegister src) {
375 subq(Imm32(sizeof(double)), StackPointer);
376 vmovsd(src, Address(StackPointer, 0));
377 }
pushWithPatch(ImmWord word)378 CodeOffset pushWithPatch(ImmWord word) {
379 CodeOffset label = movWithPatch(word, ScratchReg);
380 push(ScratchReg);
381 return label;
382 }
383
pop(FloatRegister src)384 void pop(FloatRegister src) {
385 vmovsd(Address(StackPointer, 0), src);
386 addq(Imm32(sizeof(double)), StackPointer);
387 }
388
movWithPatch(ImmWord word,Register dest)389 CodeOffset movWithPatch(ImmWord word, Register dest) {
390 masm.movq_i64r(word.value, dest.encoding());
391 return CodeOffset(masm.currentOffset());
392 }
movWithPatch(ImmPtr imm,Register dest)393 CodeOffset movWithPatch(ImmPtr imm, Register dest) {
394 return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
395 }
396
397 // This is for patching during code generation, not after.
patchAddq(CodeOffset offset,int32_t n)398 void patchAddq(CodeOffset offset, int32_t n) {
399 unsigned char* code = masm.data();
400 X86Encoding::SetInt32(code + offset.offset(), n);
401 }
402
403 // Load an ImmWord value into a register. Note that this instruction will
404 // attempt to optimize its immediate field size. When a full 64-bit
405 // immediate is needed for a relocation, use movWithPatch.
movq(ImmWord word,Register dest)406 void movq(ImmWord word, Register dest) {
407 // Load a 64-bit immediate into a register. If the value falls into
408 // certain ranges, we can use specialized instructions which have
409 // smaller encodings.
410 if (word.value <= UINT32_MAX) {
411 // movl has a 32-bit unsigned (effectively) immediate field.
412 masm.movl_i32r((uint32_t)word.value, dest.encoding());
413 } else if ((intptr_t)word.value >= INT32_MIN &&
414 (intptr_t)word.value <= INT32_MAX) {
415 // movq has a 32-bit signed immediate field.
416 masm.movq_i32r((int32_t)(intptr_t)word.value, dest.encoding());
417 } else {
418 // Otherwise use movabs.
419 masm.movq_i64r(word.value, dest.encoding());
420 }
421 }
movq(ImmPtr imm,Register dest)422 void movq(ImmPtr imm, Register dest) {
423 movq(ImmWord(uintptr_t(imm.value)), dest);
424 }
movq(ImmGCPtr ptr,Register dest)425 void movq(ImmGCPtr ptr, Register dest) {
426 masm.movq_i64r(uintptr_t(ptr.value), dest.encoding());
427 writeDataRelocation(ptr);
428 }
movq(const Operand & src,Register dest)429 void movq(const Operand& src, Register dest) {
430 switch (src.kind()) {
431 case Operand::REG:
432 masm.movq_rr(src.reg(), dest.encoding());
433 break;
434 case Operand::MEM_REG_DISP:
435 masm.movq_mr(src.disp(), src.base(), dest.encoding());
436 break;
437 case Operand::MEM_SCALE:
438 masm.movq_mr(src.disp(), src.base(), src.index(), src.scale(),
439 dest.encoding());
440 break;
441 case Operand::MEM_ADDRESS32:
442 masm.movq_mr(src.address(), dest.encoding());
443 break;
444 default:
445 MOZ_CRASH("unexpected operand kind");
446 }
447 }
movq(Register src,const Operand & dest)448 void movq(Register src, const Operand& dest) {
449 switch (dest.kind()) {
450 case Operand::REG:
451 masm.movq_rr(src.encoding(), dest.reg());
452 break;
453 case Operand::MEM_REG_DISP:
454 masm.movq_rm(src.encoding(), dest.disp(), dest.base());
455 break;
456 case Operand::MEM_SCALE:
457 masm.movq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
458 dest.scale());
459 break;
460 case Operand::MEM_ADDRESS32:
461 masm.movq_rm(src.encoding(), dest.address());
462 break;
463 default:
464 MOZ_CRASH("unexpected operand kind");
465 }
466 }
movq(Imm32 imm32,const Operand & dest)467 void movq(Imm32 imm32, const Operand& dest) {
468 switch (dest.kind()) {
469 case Operand::REG:
470 masm.movl_i32r(imm32.value, dest.reg());
471 break;
472 case Operand::MEM_REG_DISP:
473 masm.movq_i32m(imm32.value, dest.disp(), dest.base());
474 break;
475 case Operand::MEM_SCALE:
476 masm.movq_i32m(imm32.value, dest.disp(), dest.base(), dest.index(),
477 dest.scale());
478 break;
479 case Operand::MEM_ADDRESS32:
480 masm.movq_i32m(imm32.value, dest.address());
481 break;
482 default:
483 MOZ_CRASH("unexpected operand kind");
484 }
485 }
vmovq(Register src,FloatRegister dest)486 void vmovq(Register src, FloatRegister dest) {
487 masm.vmovq_rr(src.encoding(), dest.encoding());
488 }
vmovq(FloatRegister src,Register dest)489 void vmovq(FloatRegister src, Register dest) {
490 masm.vmovq_rr(src.encoding(), dest.encoding());
491 }
movq(Register src,Register dest)492 void movq(Register src, Register dest) {
493 masm.movq_rr(src.encoding(), dest.encoding());
494 }
495
cmovCCq(Condition cond,const Operand & src,Register dest)496 void cmovCCq(Condition cond, const Operand& src, Register dest) {
497 X86Encoding::Condition cc = static_cast<X86Encoding::Condition>(cond);
498 switch (src.kind()) {
499 case Operand::REG:
500 masm.cmovCCq_rr(cc, src.reg(), dest.encoding());
501 break;
502 case Operand::MEM_REG_DISP:
503 masm.cmovCCq_mr(cc, src.disp(), src.base(), dest.encoding());
504 break;
505 case Operand::MEM_SCALE:
506 masm.cmovCCq_mr(cc, src.disp(), src.base(), src.index(), src.scale(),
507 dest.encoding());
508 break;
509 default:
510 MOZ_CRASH("unexpected operand kind");
511 }
512 }
cmovzq(const Operand & src,Register dest)513 void cmovzq(const Operand& src, Register dest) {
514 cmovCCq(Condition::Zero, src, dest);
515 }
cmovnzq(const Operand & src,Register dest)516 void cmovnzq(const Operand& src, Register dest) {
517 cmovCCq(Condition::NonZero, src, dest);
518 }
519
520 template <typename T>
lock_addq(T src,const Operand & op)521 void lock_addq(T src, const Operand& op) {
522 masm.prefix_lock();
523 addq(src, op);
524 }
525 template <typename T>
lock_subq(T src,const Operand & op)526 void lock_subq(T src, const Operand& op) {
527 masm.prefix_lock();
528 subq(src, op);
529 }
530 template <typename T>
lock_andq(T src,const Operand & op)531 void lock_andq(T src, const Operand& op) {
532 masm.prefix_lock();
533 andq(src, op);
534 }
535 template <typename T>
lock_orq(T src,const Operand & op)536 void lock_orq(T src, const Operand& op) {
537 masm.prefix_lock();
538 orq(src, op);
539 }
540 template <typename T>
lock_xorq(T src,const Operand & op)541 void lock_xorq(T src, const Operand& op) {
542 masm.prefix_lock();
543 xorq(src, op);
544 }
545
lock_cmpxchgq(Register src,const Operand & mem)546 void lock_cmpxchgq(Register src, const Operand& mem) {
547 masm.prefix_lock();
548 switch (mem.kind()) {
549 case Operand::MEM_REG_DISP:
550 masm.cmpxchgq(src.encoding(), mem.disp(), mem.base());
551 break;
552 case Operand::MEM_SCALE:
553 masm.cmpxchgq(src.encoding(), mem.disp(), mem.base(), mem.index(),
554 mem.scale());
555 break;
556 default:
557 MOZ_CRASH("unexpected operand kind");
558 }
559 }
560
xchgq(Register src,Register dest)561 void xchgq(Register src, Register dest) {
562 masm.xchgq_rr(src.encoding(), dest.encoding());
563 }
564
xchgq(Register src,const Operand & mem)565 void xchgq(Register src, const Operand& mem) {
566 switch (mem.kind()) {
567 case Operand::MEM_REG_DISP:
568 masm.xchgq_rm(src.encoding(), mem.disp(), mem.base());
569 break;
570 case Operand::MEM_SCALE:
571 masm.xchgq_rm(src.encoding(), mem.disp(), mem.base(), mem.index(),
572 mem.scale());
573 break;
574 default:
575 MOZ_CRASH("unexpected operand kind");
576 }
577 }
578
lock_xaddq(Register srcdest,const Operand & mem)579 void lock_xaddq(Register srcdest, const Operand& mem) {
580 switch (mem.kind()) {
581 case Operand::MEM_REG_DISP:
582 masm.lock_xaddq_rm(srcdest.encoding(), mem.disp(), mem.base());
583 break;
584 case Operand::MEM_SCALE:
585 masm.lock_xaddq_rm(srcdest.encoding(), mem.disp(), mem.base(),
586 mem.index(), mem.scale());
587 break;
588 default:
589 MOZ_CRASH("unexpected operand kind");
590 }
591 }
592
movsbq(const Operand & src,Register dest)593 void movsbq(const Operand& src, Register dest) {
594 switch (src.kind()) {
595 case Operand::REG:
596 masm.movsbq_rr(src.reg(), dest.encoding());
597 break;
598 case Operand::MEM_REG_DISP:
599 masm.movsbq_mr(src.disp(), src.base(), dest.encoding());
600 break;
601 case Operand::MEM_SCALE:
602 masm.movsbq_mr(src.disp(), src.base(), src.index(), src.scale(),
603 dest.encoding());
604 break;
605 default:
606 MOZ_CRASH("unexpected operand kind");
607 }
608 }
609
movzbq(const Operand & src,Register dest)610 void movzbq(const Operand& src, Register dest) {
611 // movzbl zero-extends to 64 bits and is one byte smaller, so use that
612 // instead.
613 movzbl(src, dest);
614 }
615
movswq(const Operand & src,Register dest)616 void movswq(const Operand& src, Register dest) {
617 switch (src.kind()) {
618 case Operand::REG:
619 masm.movswq_rr(src.reg(), dest.encoding());
620 break;
621 case Operand::MEM_REG_DISP:
622 masm.movswq_mr(src.disp(), src.base(), dest.encoding());
623 break;
624 case Operand::MEM_SCALE:
625 masm.movswq_mr(src.disp(), src.base(), src.index(), src.scale(),
626 dest.encoding());
627 break;
628 default:
629 MOZ_CRASH("unexpected operand kind");
630 }
631 }
632
movzwq(const Operand & src,Register dest)633 void movzwq(const Operand& src, Register dest) {
634 // movzwl zero-extends to 64 bits and is one byte smaller, so use that
635 // instead.
636 movzwl(src, dest);
637 }
638
movslq(Register src,Register dest)639 void movslq(Register src, Register dest) {
640 masm.movslq_rr(src.encoding(), dest.encoding());
641 }
movslq(const Operand & src,Register dest)642 void movslq(const Operand& src, Register dest) {
643 switch (src.kind()) {
644 case Operand::REG:
645 masm.movslq_rr(src.reg(), dest.encoding());
646 break;
647 case Operand::MEM_REG_DISP:
648 masm.movslq_mr(src.disp(), src.base(), dest.encoding());
649 break;
650 case Operand::MEM_SCALE:
651 masm.movslq_mr(src.disp(), src.base(), src.index(), src.scale(),
652 dest.encoding());
653 break;
654 default:
655 MOZ_CRASH("unexpected operand kind");
656 }
657 }
658
andq(Register src,Register dest)659 void andq(Register src, Register dest) {
660 masm.andq_rr(src.encoding(), dest.encoding());
661 }
andq(Imm32 imm,Register dest)662 void andq(Imm32 imm, Register dest) {
663 masm.andq_ir(imm.value, dest.encoding());
664 }
andq(const Operand & src,Register dest)665 void andq(const Operand& src, Register dest) {
666 switch (src.kind()) {
667 case Operand::REG:
668 masm.andq_rr(src.reg(), dest.encoding());
669 break;
670 case Operand::MEM_REG_DISP:
671 masm.andq_mr(src.disp(), src.base(), dest.encoding());
672 break;
673 case Operand::MEM_SCALE:
674 masm.andq_mr(src.disp(), src.base(), src.index(), src.scale(),
675 dest.encoding());
676 break;
677 case Operand::MEM_ADDRESS32:
678 masm.andq_mr(src.address(), dest.encoding());
679 break;
680 default:
681 MOZ_CRASH("unexpected operand kind");
682 }
683 }
andq(Register src,const Operand & dest)684 void andq(Register src, const Operand& dest) {
685 switch (dest.kind()) {
686 case Operand::REG:
687 masm.andq_rr(src.encoding(), dest.reg());
688 break;
689 case Operand::MEM_REG_DISP:
690 masm.andq_rm(src.encoding(), dest.disp(), dest.base());
691 break;
692 case Operand::MEM_SCALE:
693 masm.andq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
694 dest.scale());
695 break;
696 default:
697 MOZ_CRASH("unexpected operand kind");
698 }
699 }
700
addq(Imm32 imm,Register dest)701 void addq(Imm32 imm, Register dest) {
702 masm.addq_ir(imm.value, dest.encoding());
703 }
addqWithPatch(Imm32 imm,Register dest)704 CodeOffset addqWithPatch(Imm32 imm, Register dest) {
705 masm.addq_i32r(imm.value, dest.encoding());
706 return CodeOffset(masm.currentOffset());
707 }
addq(Imm32 imm,const Operand & dest)708 void addq(Imm32 imm, const Operand& dest) {
709 switch (dest.kind()) {
710 case Operand::REG:
711 masm.addq_ir(imm.value, dest.reg());
712 break;
713 case Operand::MEM_REG_DISP:
714 masm.addq_im(imm.value, dest.disp(), dest.base());
715 break;
716 case Operand::MEM_ADDRESS32:
717 masm.addq_im(imm.value, dest.address());
718 break;
719 default:
720 MOZ_CRASH("unexpected operand kind");
721 }
722 }
addq(Register src,Register dest)723 void addq(Register src, Register dest) {
724 masm.addq_rr(src.encoding(), dest.encoding());
725 }
addq(const Operand & src,Register dest)726 void addq(const Operand& src, Register dest) {
727 switch (src.kind()) {
728 case Operand::REG:
729 masm.addq_rr(src.reg(), dest.encoding());
730 break;
731 case Operand::MEM_REG_DISP:
732 masm.addq_mr(src.disp(), src.base(), dest.encoding());
733 break;
734 case Operand::MEM_ADDRESS32:
735 masm.addq_mr(src.address(), dest.encoding());
736 break;
737 default:
738 MOZ_CRASH("unexpected operand kind");
739 }
740 }
addq(Register src,const Operand & dest)741 void addq(Register src, const Operand& dest) {
742 switch (dest.kind()) {
743 case Operand::REG:
744 masm.addq_rr(src.encoding(), dest.reg());
745 break;
746 case Operand::MEM_REG_DISP:
747 masm.addq_rm(src.encoding(), dest.disp(), dest.base());
748 break;
749 case Operand::MEM_SCALE:
750 masm.addq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
751 dest.scale());
752 break;
753 default:
754 MOZ_CRASH("unexpected operand kind");
755 }
756 }
757
subq(Imm32 imm,Register dest)758 void subq(Imm32 imm, Register dest) {
759 masm.subq_ir(imm.value, dest.encoding());
760 }
subq(Register src,Register dest)761 void subq(Register src, Register dest) {
762 masm.subq_rr(src.encoding(), dest.encoding());
763 }
subq(const Operand & src,Register dest)764 void subq(const Operand& src, Register dest) {
765 switch (src.kind()) {
766 case Operand::REG:
767 masm.subq_rr(src.reg(), dest.encoding());
768 break;
769 case Operand::MEM_REG_DISP:
770 masm.subq_mr(src.disp(), src.base(), dest.encoding());
771 break;
772 case Operand::MEM_ADDRESS32:
773 masm.subq_mr(src.address(), dest.encoding());
774 break;
775 default:
776 MOZ_CRASH("unexpected operand kind");
777 }
778 }
subq(Register src,const Operand & dest)779 void subq(Register src, const Operand& dest) {
780 switch (dest.kind()) {
781 case Operand::REG:
782 masm.subq_rr(src.encoding(), dest.reg());
783 break;
784 case Operand::MEM_REG_DISP:
785 masm.subq_rm(src.encoding(), dest.disp(), dest.base());
786 break;
787 case Operand::MEM_SCALE:
788 masm.subq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
789 dest.scale());
790 break;
791 default:
792 MOZ_CRASH("unexpected operand kind");
793 }
794 }
shlq(Imm32 imm,Register dest)795 void shlq(Imm32 imm, Register dest) {
796 masm.shlq_ir(imm.value, dest.encoding());
797 }
shrq(Imm32 imm,Register dest)798 void shrq(Imm32 imm, Register dest) {
799 masm.shrq_ir(imm.value, dest.encoding());
800 }
sarq(Imm32 imm,Register dest)801 void sarq(Imm32 imm, Register dest) {
802 masm.sarq_ir(imm.value, dest.encoding());
803 }
shlq_cl(Register dest)804 void shlq_cl(Register dest) { masm.shlq_CLr(dest.encoding()); }
shrq_cl(Register dest)805 void shrq_cl(Register dest) { masm.shrq_CLr(dest.encoding()); }
sarq_cl(Register dest)806 void sarq_cl(Register dest) { masm.sarq_CLr(dest.encoding()); }
rolq(Imm32 imm,Register dest)807 void rolq(Imm32 imm, Register dest) {
808 masm.rolq_ir(imm.value, dest.encoding());
809 }
rolq_cl(Register dest)810 void rolq_cl(Register dest) { masm.rolq_CLr(dest.encoding()); }
rorq(Imm32 imm,Register dest)811 void rorq(Imm32 imm, Register dest) {
812 masm.rorq_ir(imm.value, dest.encoding());
813 }
rorq_cl(Register dest)814 void rorq_cl(Register dest) { masm.rorq_CLr(dest.encoding()); }
orq(Imm32 imm,Register dest)815 void orq(Imm32 imm, Register dest) {
816 masm.orq_ir(imm.value, dest.encoding());
817 }
orq(Register src,Register dest)818 void orq(Register src, Register dest) {
819 masm.orq_rr(src.encoding(), dest.encoding());
820 }
orq(const Operand & src,Register dest)821 void orq(const Operand& src, Register dest) {
822 switch (src.kind()) {
823 case Operand::REG:
824 masm.orq_rr(src.reg(), dest.encoding());
825 break;
826 case Operand::MEM_REG_DISP:
827 masm.orq_mr(src.disp(), src.base(), dest.encoding());
828 break;
829 case Operand::MEM_ADDRESS32:
830 masm.orq_mr(src.address(), dest.encoding());
831 break;
832 default:
833 MOZ_CRASH("unexpected operand kind");
834 }
835 }
orq(Register src,const Operand & dest)836 void orq(Register src, const Operand& dest) {
837 switch (dest.kind()) {
838 case Operand::REG:
839 masm.orq_rr(src.encoding(), dest.reg());
840 break;
841 case Operand::MEM_REG_DISP:
842 masm.orq_rm(src.encoding(), dest.disp(), dest.base());
843 break;
844 case Operand::MEM_SCALE:
845 masm.orq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
846 dest.scale());
847 break;
848 default:
849 MOZ_CRASH("unexpected operand kind");
850 }
851 }
xorq(Register src,Register dest)852 void xorq(Register src, Register dest) {
853 masm.xorq_rr(src.encoding(), dest.encoding());
854 }
xorq(Imm32 imm,Register dest)855 void xorq(Imm32 imm, Register dest) {
856 masm.xorq_ir(imm.value, dest.encoding());
857 }
xorq(const Operand & src,Register dest)858 void xorq(const Operand& src, Register dest) {
859 switch (src.kind()) {
860 case Operand::REG:
861 masm.xorq_rr(src.reg(), dest.encoding());
862 break;
863 case Operand::MEM_REG_DISP:
864 masm.xorq_mr(src.disp(), src.base(), dest.encoding());
865 break;
866 case Operand::MEM_SCALE:
867 masm.xorq_mr(src.disp(), src.base(), src.index(), src.scale(),
868 dest.encoding());
869 break;
870 case Operand::MEM_ADDRESS32:
871 masm.xorq_mr(src.address(), dest.encoding());
872 break;
873 default:
874 MOZ_CRASH("unexpected operand kind");
875 }
876 }
xorq(Register src,const Operand & dest)877 void xorq(Register src, const Operand& dest) {
878 switch (dest.kind()) {
879 case Operand::REG:
880 masm.xorq_rr(src.encoding(), dest.reg());
881 break;
882 case Operand::MEM_REG_DISP:
883 masm.xorq_rm(src.encoding(), dest.disp(), dest.base());
884 break;
885 case Operand::MEM_SCALE:
886 masm.xorq_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
887 dest.scale());
888 break;
889 default:
890 MOZ_CRASH("unexpected operand kind");
891 }
892 }
893
bsrq(const Register & src,const Register & dest)894 void bsrq(const Register& src, const Register& dest) {
895 masm.bsrq_rr(src.encoding(), dest.encoding());
896 }
bsfq(const Register & src,const Register & dest)897 void bsfq(const Register& src, const Register& dest) {
898 masm.bsfq_rr(src.encoding(), dest.encoding());
899 }
popcntq(const Register & src,const Register & dest)900 void popcntq(const Register& src, const Register& dest) {
901 masm.popcntq_rr(src.encoding(), dest.encoding());
902 }
903
imulq(Register src,Register dest)904 void imulq(Register src, Register dest) {
905 masm.imulq_rr(src.encoding(), dest.encoding());
906 }
imulq(const Operand & src,Register dest)907 void imulq(const Operand& src, Register dest) {
908 switch (src.kind()) {
909 case Operand::REG:
910 masm.imulq_rr(src.reg(), dest.encoding());
911 break;
912 case Operand::MEM_REG_DISP:
913 masm.imulq_mr(src.disp(), src.base(), dest.encoding());
914 break;
915 case Operand::MEM_ADDRESS32:
916 MOZ_CRASH("NYI");
917 break;
918 default:
919 MOZ_CRASH("unexpected operand kind");
920 }
921 }
922
cqo()923 void cqo() { masm.cqo(); }
idivq(Register divisor)924 void idivq(Register divisor) { masm.idivq_r(divisor.encoding()); }
udivq(Register divisor)925 void udivq(Register divisor) { masm.divq_r(divisor.encoding()); }
926
vcvtsi2sdq(Register src,FloatRegister dest)927 void vcvtsi2sdq(Register src, FloatRegister dest) {
928 masm.vcvtsi2sdq_rr(src.encoding(), dest.encoding());
929 }
930
negq(Register reg)931 void negq(Register reg) { masm.negq_r(reg.encoding()); }
932
mov(ImmWord word,Register dest)933 void mov(ImmWord word, Register dest) {
934 // Use xor for setting registers to zero, as it is specially optimized
935 // for this purpose on modern hardware. Note that it does clobber FLAGS
936 // though. Use xorl instead of xorq since they are functionally
937 // equivalent (32-bit instructions zero-extend their results to 64 bits)
938 // and xorl has a smaller encoding.
939 if (word.value == 0)
940 xorl(dest, dest);
941 else
942 movq(word, dest);
943 }
mov(ImmPtr imm,Register dest)944 void mov(ImmPtr imm, Register dest) { movq(imm, dest); }
mov(wasm::SymbolicAddress imm,Register dest)945 void mov(wasm::SymbolicAddress imm, Register dest) {
946 masm.movq_i64r(-1, dest.encoding());
947 append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm));
948 }
mov(const Operand & src,Register dest)949 void mov(const Operand& src, Register dest) { movq(src, dest); }
mov(Register src,const Operand & dest)950 void mov(Register src, const Operand& dest) { movq(src, dest); }
mov(Imm32 imm32,const Operand & dest)951 void mov(Imm32 imm32, const Operand& dest) { movq(imm32, dest); }
mov(Register src,Register dest)952 void mov(Register src, Register dest) { movq(src, dest); }
mov(CodeLabel * label,Register dest)953 void mov(CodeLabel* label, Register dest) {
954 masm.movq_i64r(/* placeholder */ 0, dest.encoding());
955 label->patchAt()->bind(masm.size());
956 }
xchg(Register src,Register dest)957 void xchg(Register src, Register dest) { xchgq(src, dest); }
958
lea(const Operand & src,Register dest)959 void lea(const Operand& src, Register dest) {
960 switch (src.kind()) {
961 case Operand::MEM_REG_DISP:
962 masm.leaq_mr(src.disp(), src.base(), dest.encoding());
963 break;
964 case Operand::MEM_SCALE:
965 masm.leaq_mr(src.disp(), src.base(), src.index(), src.scale(),
966 dest.encoding());
967 break;
968 default:
969 MOZ_CRASH("unexepcted operand kind");
970 }
971 }
972
loadRipRelativeInt32(Register dest)973 CodeOffset loadRipRelativeInt32(Register dest) {
974 return CodeOffset(masm.movl_ripr(dest.encoding()).offset());
975 }
loadRipRelativeInt64(Register dest)976 CodeOffset loadRipRelativeInt64(Register dest) {
977 return CodeOffset(masm.movq_ripr(dest.encoding()).offset());
978 }
loadRipRelativeDouble(FloatRegister dest)979 CodeOffset loadRipRelativeDouble(FloatRegister dest) {
980 return CodeOffset(masm.vmovsd_ripr(dest.encoding()).offset());
981 }
loadRipRelativeFloat32(FloatRegister dest)982 CodeOffset loadRipRelativeFloat32(FloatRegister dest) {
983 return CodeOffset(masm.vmovss_ripr(dest.encoding()).offset());
984 }
loadRipRelativeInt32x4(FloatRegister dest)985 CodeOffset loadRipRelativeInt32x4(FloatRegister dest) {
986 return CodeOffset(masm.vmovdqa_ripr(dest.encoding()).offset());
987 }
loadRipRelativeFloat32x4(FloatRegister dest)988 CodeOffset loadRipRelativeFloat32x4(FloatRegister dest) {
989 return CodeOffset(masm.vmovaps_ripr(dest.encoding()).offset());
990 }
storeRipRelativeInt32(Register dest)991 CodeOffset storeRipRelativeInt32(Register dest) {
992 return CodeOffset(masm.movl_rrip(dest.encoding()).offset());
993 }
storeRipRelativeInt64(Register dest)994 CodeOffset storeRipRelativeInt64(Register dest) {
995 return CodeOffset(masm.movq_rrip(dest.encoding()).offset());
996 }
storeRipRelativeDouble(FloatRegister dest)997 CodeOffset storeRipRelativeDouble(FloatRegister dest) {
998 return CodeOffset(masm.vmovsd_rrip(dest.encoding()).offset());
999 }
storeRipRelativeFloat32(FloatRegister dest)1000 CodeOffset storeRipRelativeFloat32(FloatRegister dest) {
1001 return CodeOffset(masm.vmovss_rrip(dest.encoding()).offset());
1002 }
storeRipRelativeInt32x4(FloatRegister dest)1003 CodeOffset storeRipRelativeInt32x4(FloatRegister dest) {
1004 return CodeOffset(masm.vmovdqa_rrip(dest.encoding()).offset());
1005 }
storeRipRelativeFloat32x4(FloatRegister dest)1006 CodeOffset storeRipRelativeFloat32x4(FloatRegister dest) {
1007 return CodeOffset(masm.vmovaps_rrip(dest.encoding()).offset());
1008 }
leaRipRelative(Register dest)1009 CodeOffset leaRipRelative(Register dest) {
1010 return CodeOffset(masm.leaq_rip(dest.encoding()).offset());
1011 }
1012
cmpq(Register rhs,Register lhs)1013 void cmpq(Register rhs, Register lhs) {
1014 masm.cmpq_rr(rhs.encoding(), lhs.encoding());
1015 }
cmpq(Register rhs,const Operand & lhs)1016 void cmpq(Register rhs, const Operand& lhs) {
1017 switch (lhs.kind()) {
1018 case Operand::REG:
1019 masm.cmpq_rr(rhs.encoding(), lhs.reg());
1020 break;
1021 case Operand::MEM_REG_DISP:
1022 masm.cmpq_rm(rhs.encoding(), lhs.disp(), lhs.base());
1023 break;
1024 case Operand::MEM_ADDRESS32:
1025 masm.cmpq_rm(rhs.encoding(), lhs.address());
1026 break;
1027 default:
1028 MOZ_CRASH("unexpected operand kind");
1029 }
1030 }
cmpq(Imm32 rhs,Register lhs)1031 void cmpq(Imm32 rhs, Register lhs) {
1032 masm.cmpq_ir(rhs.value, lhs.encoding());
1033 }
cmpq(Imm32 rhs,const Operand & lhs)1034 void cmpq(Imm32 rhs, const Operand& lhs) {
1035 switch (lhs.kind()) {
1036 case Operand::REG:
1037 masm.cmpq_ir(rhs.value, lhs.reg());
1038 break;
1039 case Operand::MEM_REG_DISP:
1040 masm.cmpq_im(rhs.value, lhs.disp(), lhs.base());
1041 break;
1042 case Operand::MEM_SCALE:
1043 masm.cmpq_im(rhs.value, lhs.disp(), lhs.base(), lhs.index(),
1044 lhs.scale());
1045 break;
1046 case Operand::MEM_ADDRESS32:
1047 masm.cmpq_im(rhs.value, lhs.address());
1048 break;
1049 default:
1050 MOZ_CRASH("unexpected operand kind");
1051 }
1052 }
cmpq(const Operand & rhs,Register lhs)1053 void cmpq(const Operand& rhs, Register lhs) {
1054 switch (rhs.kind()) {
1055 case Operand::REG:
1056 masm.cmpq_rr(rhs.reg(), lhs.encoding());
1057 break;
1058 case Operand::MEM_REG_DISP:
1059 masm.cmpq_mr(rhs.disp(), rhs.base(), lhs.encoding());
1060 break;
1061 default:
1062 MOZ_CRASH("unexpected operand kind");
1063 }
1064 }
1065
testq(Imm32 rhs,Register lhs)1066 void testq(Imm32 rhs, Register lhs) {
1067 masm.testq_ir(rhs.value, lhs.encoding());
1068 }
testq(Register rhs,Register lhs)1069 void testq(Register rhs, Register lhs) {
1070 masm.testq_rr(rhs.encoding(), lhs.encoding());
1071 }
testq(Imm32 rhs,const Operand & lhs)1072 void testq(Imm32 rhs, const Operand& lhs) {
1073 switch (lhs.kind()) {
1074 case Operand::REG:
1075 masm.testq_ir(rhs.value, lhs.reg());
1076 break;
1077 case Operand::MEM_REG_DISP:
1078 masm.testq_i32m(rhs.value, lhs.disp(), lhs.base());
1079 break;
1080 default:
1081 MOZ_CRASH("unexpected operand kind");
1082 break;
1083 }
1084 }
1085
1086 void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
1087 JmpSrc src = masm.jmp();
1088 addPendingJump(src, target, reloc);
1089 }
1090 void j(Condition cond, ImmPtr target,
1091 Relocation::Kind reloc = Relocation::HARDCODED) {
1092 JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
1093 addPendingJump(src, target, reloc);
1094 }
1095
jmp(JitCode * target)1096 void jmp(JitCode* target) { jmp(ImmPtr(target->raw()), Relocation::JITCODE); }
j(Condition cond,JitCode * target)1097 void j(Condition cond, JitCode* target) {
1098 j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
1099 }
call(JitCode * target)1100 void call(JitCode* target) {
1101 JmpSrc src = masm.call();
1102 addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
1103 }
call(ImmWord target)1104 void call(ImmWord target) { call(ImmPtr((void*)target.value)); }
call(ImmPtr target)1105 void call(ImmPtr target) {
1106 JmpSrc src = masm.call();
1107 addPendingJump(src, target, Relocation::HARDCODED);
1108 }
1109
1110 // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
1111 // this instruction.
toggledCall(JitCode * target,bool enabled)1112 CodeOffset toggledCall(JitCode* target, bool enabled) {
1113 CodeOffset offset(size());
1114 JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
1115 addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
1116 MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
1117 return offset;
1118 }
1119
ToggledCallSize(uint8_t * code)1120 static size_t ToggledCallSize(uint8_t* code) {
1121 // Size of a call instruction.
1122 return 5;
1123 }
1124
1125 // Do not mask shared implementations.
1126 using AssemblerX86Shared::call;
1127
vcvttsd2sq(FloatRegister src,Register dest)1128 void vcvttsd2sq(FloatRegister src, Register dest) {
1129 masm.vcvttsd2sq_rr(src.encoding(), dest.encoding());
1130 }
vcvttss2sq(FloatRegister src,Register dest)1131 void vcvttss2sq(FloatRegister src, Register dest) {
1132 masm.vcvttss2sq_rr(src.encoding(), dest.encoding());
1133 }
vcvtsq2sd(Register src1,FloatRegister src0,FloatRegister dest)1134 void vcvtsq2sd(Register src1, FloatRegister src0, FloatRegister dest) {
1135 masm.vcvtsq2sd_rr(src1.encoding(), src0.encoding(), dest.encoding());
1136 }
vcvtsq2ss(Register src1,FloatRegister src0,FloatRegister dest)1137 void vcvtsq2ss(Register src1, FloatRegister src0, FloatRegister dest) {
1138 masm.vcvtsq2ss_rr(src1.encoding(), src0.encoding(), dest.encoding());
1139 }
1140 };
1141
1142 static inline void PatchJump(CodeLocationJump jump, CodeLocationLabel label,
1143 ReprotectCode reprotect = DontReprotect) {
1144 if (X86Encoding::CanRelinkJump(jump.raw(), label.raw())) {
1145 MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
1146 X86Encoding::SetRel32(jump.raw(), label.raw());
1147 } else {
1148 {
1149 MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
1150 X86Encoding::SetRel32(jump.raw(), jump.jumpTableEntry());
1151 }
1152 Assembler::PatchJumpEntry(jump.jumpTableEntry(), label.raw(), reprotect);
1153 }
1154 }
1155
PatchBackedge(CodeLocationJump & jump_,CodeLocationLabel label,JitZoneGroup::BackedgeTarget target)1156 static inline void PatchBackedge(CodeLocationJump& jump_,
1157 CodeLocationLabel label,
1158 JitZoneGroup::BackedgeTarget target) {
1159 PatchJump(jump_, label);
1160 }
1161
GetIntArgReg(uint32_t intArg,uint32_t floatArg,Register * out)1162 static inline bool GetIntArgReg(uint32_t intArg, uint32_t floatArg,
1163 Register* out) {
1164 #if defined(_WIN64)
1165 uint32_t arg = intArg + floatArg;
1166 #else
1167 uint32_t arg = intArg;
1168 #endif
1169 if (arg >= NumIntArgRegs) return false;
1170 *out = IntArgRegs[arg];
1171 return true;
1172 }
1173
1174 // Get a register in which we plan to put a quantity that will be used as an
1175 // integer argument. This differs from GetIntArgReg in that if we have no more
1176 // actual argument registers to use we will fall back on using whatever
1177 // CallTempReg* don't overlap the argument registers, and only fail once those
1178 // run out too.
GetTempRegForIntArg(uint32_t usedIntArgs,uint32_t usedFloatArgs,Register * out)1179 static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
1180 uint32_t usedFloatArgs, Register* out) {
1181 if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) return true;
1182 // Unfortunately, we have to assume things about the point at which
1183 // GetIntArgReg returns false, because we need to know how many registers it
1184 // can allocate.
1185 #if defined(_WIN64)
1186 uint32_t arg = usedIntArgs + usedFloatArgs;
1187 #else
1188 uint32_t arg = usedIntArgs;
1189 #endif
1190 arg -= NumIntArgRegs;
1191 if (arg >= NumCallTempNonArgRegs) return false;
1192 *out = CallTempNonArgRegs[arg];
1193 return true;
1194 }
1195
GetFloatArgReg(uint32_t intArg,uint32_t floatArg,FloatRegister * out)1196 static inline bool GetFloatArgReg(uint32_t intArg, uint32_t floatArg,
1197 FloatRegister* out) {
1198 #if defined(_WIN64)
1199 uint32_t arg = intArg + floatArg;
1200 #else
1201 uint32_t arg = floatArg;
1202 #endif
1203 if (floatArg >= NumFloatArgRegs) return false;
1204 *out = FloatArgRegs[arg];
1205 return true;
1206 }
1207
1208 } // namespace jit
1209 } // namespace js
1210
1211 #endif /* jit_x64_Assembler_x64_h */
1212