1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_x86_Assembler_x86_h
8 #define jit_x86_Assembler_x86_h
9
10 #include "mozilla/ArrayUtils.h"
11
12 #include "jit/CompactBuffer.h"
13 #include "jit/IonCode.h"
14 #include "jit/JitCompartment.h"
15 #include "jit/shared/Assembler-shared.h"
16 #include "jit/x86-shared/Constants-x86-shared.h"
17
18 namespace js {
19 namespace jit {
20
21 static MOZ_CONSTEXPR_VAR Register eax = { X86Encoding::rax };
22 static MOZ_CONSTEXPR_VAR Register ecx = { X86Encoding::rcx };
23 static MOZ_CONSTEXPR_VAR Register edx = { X86Encoding::rdx };
24 static MOZ_CONSTEXPR_VAR Register ebx = { X86Encoding::rbx };
25 static MOZ_CONSTEXPR_VAR Register esp = { X86Encoding::rsp };
26 static MOZ_CONSTEXPR_VAR Register ebp = { X86Encoding::rbp };
27 static MOZ_CONSTEXPR_VAR Register esi = { X86Encoding::rsi };
28 static MOZ_CONSTEXPR_VAR Register edi = { X86Encoding::rdi };
29
30 static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
31 static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
32 static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
33 static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
34 static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
35 static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
36 static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
37 static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
38
39 static MOZ_CONSTEXPR_VAR Register InvalidReg = { X86Encoding::invalid_reg };
40 static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = FloatRegister();
41
42 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = ecx;
43 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = edx;
44 static MOZ_CONSTEXPR_VAR Register StackPointer = esp;
45 static MOZ_CONSTEXPR_VAR Register FramePointer = ebp;
46 static MOZ_CONSTEXPR_VAR Register ReturnReg = eax;
47 static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
48 static MOZ_CONSTEXPR_VAR FloatRegister ReturnDoubleReg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
49 static MOZ_CONSTEXPR_VAR FloatRegister ReturnSimd128Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
50 static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloat32Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Single);
51 static MOZ_CONSTEXPR_VAR FloatRegister ScratchDoubleReg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
52 static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimd128Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Simd128);
53
54 // Avoid ebp, which is the FramePointer, which is unavailable in some modes.
55 static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = esi;
56 static MOZ_CONSTEXPR_VAR Register CallTempReg0 = edi;
57 static MOZ_CONSTEXPR_VAR Register CallTempReg1 = eax;
58 static MOZ_CONSTEXPR_VAR Register CallTempReg2 = ebx;
59 static MOZ_CONSTEXPR_VAR Register CallTempReg3 = ecx;
60 static MOZ_CONSTEXPR_VAR Register CallTempReg4 = esi;
61 static MOZ_CONSTEXPR_VAR Register CallTempReg5 = edx;
62
63 // We have no arg regs, so our NonArgRegs are just our CallTempReg*
64 // Use "const" instead of MOZ_CONSTEXPR_VAR here to work around a bug
65 // of VS2015 Update 1. See bug 1229604.
66 static const Register CallTempNonArgRegs[] = { edi, eax, ebx, ecx, esi, edx };
67 static const uint32_t NumCallTempNonArgRegs =
68 mozilla::ArrayLength(CallTempNonArgRegs);
69
70 class ABIArgGenerator
71 {
72 uint32_t stackOffset_;
73 ABIArg current_;
74
75 public:
76 ABIArgGenerator();
77 ABIArg next(MIRType argType);
current()78 ABIArg& current() { return current_; }
stackBytesConsumedSoFar()79 uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
80
81 // Note: these registers are all guaranteed to be different
82 static const Register NonArgReturnReg0;
83 static const Register NonArgReturnReg1;
84 static const Register NonVolatileReg;
85 static const Register NonArg_VolatileReg;
86 static const Register NonReturn_VolatileReg0;
87 };
88
89 static MOZ_CONSTEXPR_VAR Register OsrFrameReg = edx;
90 static MOZ_CONSTEXPR_VAR Register PreBarrierReg = edx;
91
92 // Registers used in the GenerateFFIIonExit Enable Activation block.
93 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegCallee = ecx;
94 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE0 = edi;
95 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE1 = eax;
96 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE2 = ebx;
97 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE3 = edx;
98
99 // Registers used in the GenerateFFIIonExit Disable Activation block.
100 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnData = edx;
101 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnType = ecx;
102 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = edi;
103 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = eax;
104 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = esi;
105
106 // GCC stack is aligned on 16 bytes. Ion does not maintain this for internal
107 // calls. asm.js code does.
108 #if defined(__GNUC__)
109 static MOZ_CONSTEXPR_VAR uint32_t ABIStackAlignment = 16;
110 #else
111 static MOZ_CONSTEXPR_VAR uint32_t ABIStackAlignment = 4;
112 #endif
113 static MOZ_CONSTEXPR_VAR uint32_t CodeAlignment = 16;
114 static MOZ_CONSTEXPR_VAR uint32_t JitStackAlignment = 16;
115
116 static MOZ_CONSTEXPR_VAR uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
117 static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
118 "Stack alignment should be a non-zero multiple of sizeof(Value)");
119
120 // This boolean indicates whether we support SIMD instructions flavoured for
121 // this architecture or not. Rather than a method in the LIRGenerator, it is
122 // here such that it is accessible from the entire codebase. Once full support
123 // for SIMD is reached on all tier-1 platforms, this constant can be deleted.
124 static MOZ_CONSTEXPR_VAR bool SupportsSimd = true;
125 static MOZ_CONSTEXPR_VAR uint32_t SimdMemoryAlignment = 16;
126
127 static_assert(CodeAlignment % SimdMemoryAlignment == 0,
128 "Code alignment should be larger than any of the alignments which are used for "
129 "the constant sections of the code buffer. Thus it should be larger than the "
130 "alignment for SIMD constants.");
131
132 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
133 "Stack alignment should be larger than any of the alignments which are used for "
134 "spilled values. Thus it should be larger than the alignment for SIMD accesses.");
135
136 static const uint32_t AsmJSStackAlignment = SimdMemoryAlignment;
137
138 struct ImmTag : public Imm32
139 {
ImmTagImmTag140 ImmTag(JSValueTag mask)
141 : Imm32(int32_t(mask))
142 { }
143 };
144
145 struct ImmType : public ImmTag
146 {
ImmTypeImmType147 ImmType(JSValueType type)
148 : ImmTag(JSVAL_TYPE_TO_TAG(type))
149 { }
150 };
151
152 static const Scale ScalePointer = TimesFour;
153
154 } // namespace jit
155 } // namespace js
156
157 #include "jit/x86-shared/Assembler-x86-shared.h"
158
159 namespace js {
160 namespace jit {
161
162 static inline void
163 PatchJump(CodeLocationJump jump, CodeLocationLabel label, ReprotectCode reprotect = DontReprotect)
164 {
165 #ifdef DEBUG
166 // Assert that we're overwriting a jump instruction, either:
167 // 0F 80+cc <imm32>, or
168 // E9 <imm32>
169 unsigned char* x = (unsigned char*)jump.raw() - 5;
170 MOZ_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) ||
171 (*x == 0xE9));
172 #endif
173 MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
174 X86Encoding::SetRel32(jump.raw(), label.raw());
175 }
176 static inline void
PatchBackedge(CodeLocationJump & jump_,CodeLocationLabel label,JitRuntime::BackedgeTarget target)177 PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
178 {
179 PatchJump(jump_, label);
180 }
181
182 // Return operand from a JS -> JS call.
183 static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
184
185 class Assembler : public AssemblerX86Shared
186 {
writeRelocation(JmpSrc src)187 void writeRelocation(JmpSrc src) {
188 jumpRelocations_.writeUnsigned(src.offset());
189 }
addPendingJump(JmpSrc src,ImmPtr target,Relocation::Kind kind)190 void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind kind) {
191 enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, kind));
192 if (kind == Relocation::JITCODE)
193 writeRelocation(src);
194 }
195
196 public:
197 using AssemblerX86Shared::movl;
198 using AssemblerX86Shared::j;
199 using AssemblerX86Shared::jmp;
200 using AssemblerX86Shared::vmovsd;
201 using AssemblerX86Shared::vmovss;
202 using AssemblerX86Shared::retarget;
203 using AssemblerX86Shared::cmpl;
204 using AssemblerX86Shared::call;
205 using AssemblerX86Shared::push;
206 using AssemblerX86Shared::pop;
207
208 static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
209
210 // Copy the assembly code to the given buffer, and perform any pending
211 // relocations relying on the target address.
212 void executableCopy(uint8_t* buffer);
213
214 // Actual assembly emitting functions.
215
push(ImmGCPtr ptr)216 void push(ImmGCPtr ptr) {
217 masm.push_i32(int32_t(ptr.value));
218 writeDataRelocation(ptr);
219 }
push(const ImmWord imm)220 void push(const ImmWord imm) {
221 push(Imm32(imm.value));
222 }
push(const ImmPtr imm)223 void push(const ImmPtr imm) {
224 push(ImmWord(uintptr_t(imm.value)));
225 }
push(FloatRegister src)226 void push(FloatRegister src) {
227 subl(Imm32(sizeof(double)), StackPointer);
228 vmovsd(src, Address(StackPointer, 0));
229 }
230
pushWithPatch(ImmWord word)231 CodeOffset pushWithPatch(ImmWord word) {
232 masm.push_i32(int32_t(word.value));
233 return CodeOffset(masm.currentOffset());
234 }
235
pop(FloatRegister src)236 void pop(FloatRegister src) {
237 vmovsd(Address(StackPointer, 0), src);
238 addl(Imm32(sizeof(double)), StackPointer);
239 }
240
movWithPatch(ImmWord word,Register dest)241 CodeOffset movWithPatch(ImmWord word, Register dest) {
242 movl(Imm32(word.value), dest);
243 return CodeOffset(masm.currentOffset());
244 }
movWithPatch(ImmPtr imm,Register dest)245 CodeOffset movWithPatch(ImmPtr imm, Register dest) {
246 return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
247 }
248
movl(ImmGCPtr ptr,Register dest)249 void movl(ImmGCPtr ptr, Register dest) {
250 masm.movl_i32r(uintptr_t(ptr.value), dest.encoding());
251 writeDataRelocation(ptr);
252 }
movl(ImmGCPtr ptr,const Operand & dest)253 void movl(ImmGCPtr ptr, const Operand& dest) {
254 switch (dest.kind()) {
255 case Operand::REG:
256 masm.movl_i32r(uintptr_t(ptr.value), dest.reg());
257 writeDataRelocation(ptr);
258 break;
259 case Operand::MEM_REG_DISP:
260 masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base());
261 writeDataRelocation(ptr);
262 break;
263 case Operand::MEM_SCALE:
264 masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base(), dest.index(), dest.scale());
265 writeDataRelocation(ptr);
266 break;
267 default:
268 MOZ_CRASH("unexpected operand kind");
269 }
270 }
movl(ImmWord imm,Register dest)271 void movl(ImmWord imm, Register dest) {
272 masm.movl_i32r(imm.value, dest.encoding());
273 }
movl(ImmPtr imm,Register dest)274 void movl(ImmPtr imm, Register dest) {
275 movl(ImmWord(uintptr_t(imm.value)), dest);
276 }
mov(ImmWord imm,Register dest)277 void mov(ImmWord imm, Register dest) {
278 // Use xor for setting registers to zero, as it is specially optimized
279 // for this purpose on modern hardware. Note that it does clobber FLAGS
280 // though.
281 if (imm.value == 0)
282 xorl(dest, dest);
283 else
284 movl(imm, dest);
285 }
mov(ImmPtr imm,Register dest)286 void mov(ImmPtr imm, Register dest) {
287 mov(ImmWord(uintptr_t(imm.value)), dest);
288 }
mov(wasm::SymbolicAddress imm,Register dest)289 void mov(wasm::SymbolicAddress imm, Register dest) {
290 masm.movl_i32r(-1, dest.encoding());
291 append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm));
292 }
mov(const Operand & src,Register dest)293 void mov(const Operand& src, Register dest) {
294 movl(src, dest);
295 }
mov(Register src,const Operand & dest)296 void mov(Register src, const Operand& dest) {
297 movl(src, dest);
298 }
mov(Imm32 imm,const Operand & dest)299 void mov(Imm32 imm, const Operand& dest) {
300 movl(imm, dest);
301 }
mov(CodeOffset * label,Register dest)302 void mov(CodeOffset* label, Register dest) {
303 // Put a placeholder value in the instruction stream.
304 masm.movl_i32r(0, dest.encoding());
305 label->bind(masm.size());
306 }
mov(Register src,Register dest)307 void mov(Register src, Register dest) {
308 movl(src, dest);
309 }
xchg(Register src,Register dest)310 void xchg(Register src, Register dest) {
311 xchgl(src, dest);
312 }
lea(const Operand & src,Register dest)313 void lea(const Operand& src, Register dest) {
314 return leal(src, dest);
315 }
316
fld32(const Operand & dest)317 void fld32(const Operand& dest) {
318 switch (dest.kind()) {
319 case Operand::MEM_REG_DISP:
320 masm.fld32_m(dest.disp(), dest.base());
321 break;
322 default:
323 MOZ_CRASH("unexpected operand kind");
324 }
325 }
326
fstp32(const Operand & src)327 void fstp32(const Operand& src) {
328 switch (src.kind()) {
329 case Operand::MEM_REG_DISP:
330 masm.fstp32_m(src.disp(), src.base());
331 break;
332 default:
333 MOZ_CRASH("unexpected operand kind");
334 }
335 }
336
cmpl(ImmWord rhs,Register lhs)337 void cmpl(ImmWord rhs, Register lhs) {
338 masm.cmpl_ir(rhs.value, lhs.encoding());
339 }
cmpl(ImmPtr rhs,Register lhs)340 void cmpl(ImmPtr rhs, Register lhs) {
341 cmpl(ImmWord(uintptr_t(rhs.value)), lhs);
342 }
cmpl(ImmGCPtr rhs,Register lhs)343 void cmpl(ImmGCPtr rhs, Register lhs) {
344 masm.cmpl_i32r(uintptr_t(rhs.value), lhs.encoding());
345 writeDataRelocation(rhs);
346 }
cmpl(Register rhs,Register lhs)347 void cmpl(Register rhs, Register lhs) {
348 masm.cmpl_rr(rhs.encoding(), lhs.encoding());
349 }
cmpl(ImmGCPtr rhs,const Operand & lhs)350 void cmpl(ImmGCPtr rhs, const Operand& lhs) {
351 switch (lhs.kind()) {
352 case Operand::REG:
353 masm.cmpl_i32r(uintptr_t(rhs.value), lhs.reg());
354 writeDataRelocation(rhs);
355 break;
356 case Operand::MEM_REG_DISP:
357 masm.cmpl_i32m(uintptr_t(rhs.value), lhs.disp(), lhs.base());
358 writeDataRelocation(rhs);
359 break;
360 case Operand::MEM_ADDRESS32:
361 masm.cmpl_i32m(uintptr_t(rhs.value), lhs.address());
362 writeDataRelocation(rhs);
363 break;
364 default:
365 MOZ_CRASH("unexpected operand kind");
366 }
367 }
cmpl(Register rhs,wasm::SymbolicAddress lhs)368 void cmpl(Register rhs, wasm::SymbolicAddress lhs) {
369 masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
370 append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), lhs));
371 }
cmpl(Imm32 rhs,wasm::SymbolicAddress lhs)372 void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) {
373 JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
374 append(AsmJSAbsoluteLink(CodeOffset(src.offset()), lhs));
375 }
376
adcl(Imm32 imm,Register dest)377 void adcl(Imm32 imm, Register dest) {
378 masm.adcl_ir(imm.value, dest.encoding());
379 }
adcl(Register src,Register dest)380 void adcl(Register src, Register dest) {
381 masm.adcl_rr(src.encoding(), dest.encoding());
382 }
383
mull(Register multiplier)384 void mull(Register multiplier) {
385 masm.mull_r(multiplier.encoding());
386 }
387
shldl(const Imm32 imm,Register src,Register dest)388 void shldl(const Imm32 imm, Register src, Register dest) {
389 masm.shldl_irr(imm.value, src.encoding(), dest.encoding());
390 }
shrdl(const Imm32 imm,Register src,Register dest)391 void shrdl(const Imm32 imm, Register src, Register dest) {
392 masm.shrdl_irr(imm.value, src.encoding(), dest.encoding());
393 }
394
vhaddpd(FloatRegister src,FloatRegister dest)395 void vhaddpd(FloatRegister src, FloatRegister dest) {
396 MOZ_ASSERT(HasSSE3());
397 MOZ_ASSERT(src.size() == 16);
398 MOZ_ASSERT(dest.size() == 16);
399 masm.vhaddpd_rr(src.encoding(), dest.encoding());
400 }
vsubpd(const Operand & src1,FloatRegister src0,FloatRegister dest)401 void vsubpd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
402 MOZ_ASSERT(HasSSE2());
403 MOZ_ASSERT(src0.size() == 16);
404 MOZ_ASSERT(dest.size() == 16);
405 switch (src1.kind()) {
406 case Operand::MEM_REG_DISP:
407 masm.vsubpd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
408 break;
409 case Operand::MEM_ADDRESS32:
410 masm.vsubpd_mr(src1.address(), src0.encoding(), dest.encoding());
411 break;
412 default:
413 MOZ_CRASH("unexpected operand kind");
414 }
415 }
416
vpunpckldq(FloatRegister src1,FloatRegister src0,FloatRegister dest)417 void vpunpckldq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
418 MOZ_ASSERT(HasSSE2());
419 MOZ_ASSERT(src0.size() == 16);
420 MOZ_ASSERT(src1.size() == 16);
421 MOZ_ASSERT(dest.size() == 16);
422 masm.vpunpckldq_rr(src1.encoding(), src0.encoding(), dest.encoding());
423 }
vpunpckldq(const Operand & src1,FloatRegister src0,FloatRegister dest)424 void vpunpckldq(const Operand& src1, FloatRegister src0, FloatRegister dest) {
425 MOZ_ASSERT(HasSSE2());
426 MOZ_ASSERT(src0.size() == 16);
427 MOZ_ASSERT(dest.size() == 16);
428 switch (src1.kind()) {
429 case Operand::MEM_REG_DISP:
430 masm.vpunpckldq_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
431 break;
432 case Operand::MEM_ADDRESS32:
433 masm.vpunpckldq_mr(src1.address(), src0.encoding(), dest.encoding());
434 break;
435 default:
436 MOZ_CRASH("unexpected operand kind");
437 }
438 }
439
440 void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
441 JmpSrc src = masm.jmp();
442 addPendingJump(src, target, reloc);
443 }
444 void j(Condition cond, ImmPtr target,
445 Relocation::Kind reloc = Relocation::HARDCODED) {
446 JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
447 addPendingJump(src, target, reloc);
448 }
449
jmp(JitCode * target)450 void jmp(JitCode* target) {
451 jmp(ImmPtr(target->raw()), Relocation::JITCODE);
452 }
j(Condition cond,JitCode * target)453 void j(Condition cond, JitCode* target) {
454 j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
455 }
call(JitCode * target)456 void call(JitCode* target) {
457 JmpSrc src = masm.call();
458 addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
459 }
call(ImmWord target)460 void call(ImmWord target) {
461 call(ImmPtr((void*)target.value));
462 }
call(ImmPtr target)463 void call(ImmPtr target) {
464 JmpSrc src = masm.call();
465 addPendingJump(src, target, Relocation::HARDCODED);
466 }
467
468 // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
469 // this instruction.
toggledCall(JitCode * target,bool enabled)470 CodeOffset toggledCall(JitCode* target, bool enabled) {
471 CodeOffset offset(size());
472 JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
473 addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
474 MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
475 return offset;
476 }
477
ToggledCallSize(uint8_t * code)478 static size_t ToggledCallSize(uint8_t* code) {
479 // Size of a call instruction.
480 return 5;
481 }
482
483 // Re-routes pending jumps to an external target, flushing the label in the
484 // process.
retarget(Label * label,ImmPtr target,Relocation::Kind reloc)485 void retarget(Label* label, ImmPtr target, Relocation::Kind reloc) {
486 if (label->used()) {
487 bool more;
488 X86Encoding::JmpSrc jmp(label->offset());
489 do {
490 X86Encoding::JmpSrc next;
491 more = masm.nextJump(jmp, &next);
492 addPendingJump(jmp, target, reloc);
493 jmp = next;
494 } while (more);
495 }
496 label->reset();
497 }
498
499 // Move a 32-bit immediate into a register where the immediate can be
500 // patched.
movlWithPatch(Imm32 imm,Register dest)501 CodeOffset movlWithPatch(Imm32 imm, Register dest) {
502 masm.movl_i32r(imm.value, dest.encoding());
503 return CodeOffset(masm.currentOffset());
504 }
505
506 // Load from *(base + disp32) where disp32 can be patched.
movsblWithPatch(const Operand & src,Register dest)507 CodeOffset movsblWithPatch(const Operand& src, Register dest) {
508 switch (src.kind()) {
509 case Operand::MEM_REG_DISP:
510 masm.movsbl_mr_disp32(src.disp(), src.base(), dest.encoding());
511 break;
512 case Operand::MEM_ADDRESS32:
513 masm.movsbl_mr(src.address(), dest.encoding());
514 break;
515 default:
516 MOZ_CRASH("unexpected operand kind");
517 }
518 return CodeOffset(masm.currentOffset());
519 }
movzblWithPatch(const Operand & src,Register dest)520 CodeOffset movzblWithPatch(const Operand& src, Register dest) {
521 switch (src.kind()) {
522 case Operand::MEM_REG_DISP:
523 masm.movzbl_mr_disp32(src.disp(), src.base(), dest.encoding());
524 break;
525 case Operand::MEM_ADDRESS32:
526 masm.movzbl_mr(src.address(), dest.encoding());
527 break;
528 default:
529 MOZ_CRASH("unexpected operand kind");
530 }
531 return CodeOffset(masm.currentOffset());
532 }
movswlWithPatch(const Operand & src,Register dest)533 CodeOffset movswlWithPatch(const Operand& src, Register dest) {
534 switch (src.kind()) {
535 case Operand::MEM_REG_DISP:
536 masm.movswl_mr_disp32(src.disp(), src.base(), dest.encoding());
537 break;
538 case Operand::MEM_ADDRESS32:
539 masm.movswl_mr(src.address(), dest.encoding());
540 break;
541 default:
542 MOZ_CRASH("unexpected operand kind");
543 }
544 return CodeOffset(masm.currentOffset());
545 }
movzwlWithPatch(const Operand & src,Register dest)546 CodeOffset movzwlWithPatch(const Operand& src, Register dest) {
547 switch (src.kind()) {
548 case Operand::MEM_REG_DISP:
549 masm.movzwl_mr_disp32(src.disp(), src.base(), dest.encoding());
550 break;
551 case Operand::MEM_ADDRESS32:
552 masm.movzwl_mr(src.address(), dest.encoding());
553 break;
554 default:
555 MOZ_CRASH("unexpected operand kind");
556 }
557 return CodeOffset(masm.currentOffset());
558 }
movlWithPatch(const Operand & src,Register dest)559 CodeOffset movlWithPatch(const Operand& src, Register dest) {
560 switch (src.kind()) {
561 case Operand::MEM_REG_DISP:
562 masm.movl_mr_disp32(src.disp(), src.base(), dest.encoding());
563 break;
564 case Operand::MEM_ADDRESS32:
565 masm.movl_mr(src.address(), dest.encoding());
566 break;
567 default:
568 MOZ_CRASH("unexpected operand kind");
569 }
570 return CodeOffset(masm.currentOffset());
571 }
vmovssWithPatch(const Operand & src,FloatRegister dest)572 CodeOffset vmovssWithPatch(const Operand& src, FloatRegister dest) {
573 MOZ_ASSERT(HasSSE2());
574 switch (src.kind()) {
575 case Operand::MEM_REG_DISP:
576 masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding());
577 break;
578 case Operand::MEM_ADDRESS32:
579 masm.vmovss_mr(src.address(), dest.encoding());
580 break;
581 default:
582 MOZ_CRASH("unexpected operand kind");
583 }
584 return CodeOffset(masm.currentOffset());
585 }
vmovdWithPatch(const Operand & src,FloatRegister dest)586 CodeOffset vmovdWithPatch(const Operand& src, FloatRegister dest) {
587 MOZ_ASSERT(HasSSE2());
588 switch (src.kind()) {
589 case Operand::MEM_REG_DISP:
590 masm.vmovd_mr_disp32(src.disp(), src.base(), dest.encoding());
591 break;
592 case Operand::MEM_ADDRESS32:
593 masm.vmovd_mr(src.address(), dest.encoding());
594 break;
595 default:
596 MOZ_CRASH("unexpected operand kind");
597 }
598 return CodeOffset(masm.currentOffset());
599 }
vmovqWithPatch(const Operand & src,FloatRegister dest)600 CodeOffset vmovqWithPatch(const Operand& src, FloatRegister dest) {
601 MOZ_ASSERT(HasSSE2());
602 switch (src.kind()) {
603 case Operand::MEM_REG_DISP:
604 masm.vmovq_mr_disp32(src.disp(), src.base(), dest.encoding());
605 break;
606 case Operand::MEM_ADDRESS32:
607 masm.vmovq_mr(src.address(), dest.encoding());
608 break;
609 default:
610 MOZ_CRASH("unexpected operand kind");
611 }
612 return CodeOffset(masm.currentOffset());
613 }
vmovsdWithPatch(const Operand & src,FloatRegister dest)614 CodeOffset vmovsdWithPatch(const Operand& src, FloatRegister dest) {
615 MOZ_ASSERT(HasSSE2());
616 switch (src.kind()) {
617 case Operand::MEM_REG_DISP:
618 masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding());
619 break;
620 case Operand::MEM_ADDRESS32:
621 masm.vmovsd_mr(src.address(), dest.encoding());
622 break;
623 default:
624 MOZ_CRASH("unexpected operand kind");
625 }
626 return CodeOffset(masm.currentOffset());
627 }
vmovupsWithPatch(const Operand & src,FloatRegister dest)628 CodeOffset vmovupsWithPatch(const Operand& src, FloatRegister dest) {
629 MOZ_ASSERT(HasSSE2());
630 switch (src.kind()) {
631 case Operand::MEM_REG_DISP:
632 masm.vmovups_mr_disp32(src.disp(), src.base(), dest.encoding());
633 break;
634 case Operand::MEM_ADDRESS32:
635 masm.vmovups_mr(src.address(), dest.encoding());
636 break;
637 default:
638 MOZ_CRASH("unexpected operand kind");
639 }
640 return CodeOffset(masm.currentOffset());
641 }
vmovdquWithPatch(const Operand & src,FloatRegister dest)642 CodeOffset vmovdquWithPatch(const Operand& src, FloatRegister dest) {
643 MOZ_ASSERT(HasSSE2());
644 switch (src.kind()) {
645 case Operand::MEM_REG_DISP:
646 masm.vmovdqu_mr_disp32(src.disp(), src.base(), dest.encoding());
647 break;
648 case Operand::MEM_ADDRESS32:
649 masm.vmovdqu_mr(src.address(), dest.encoding());
650 break;
651 default:
652 MOZ_CRASH("unexpected operand kind");
653 }
654 return CodeOffset(masm.currentOffset());
655 }
656
657 // Store to *(base + disp32) where disp32 can be patched.
movbWithPatch(Register src,const Operand & dest)658 CodeOffset movbWithPatch(Register src, const Operand& dest) {
659 switch (dest.kind()) {
660 case Operand::MEM_REG_DISP:
661 masm.movb_rm_disp32(src.encoding(), dest.disp(), dest.base());
662 break;
663 case Operand::MEM_ADDRESS32:
664 masm.movb_rm(src.encoding(), dest.address());
665 break;
666 default:
667 MOZ_CRASH("unexpected operand kind");
668 }
669 return CodeOffset(masm.currentOffset());
670 }
movwWithPatch(Register src,const Operand & dest)671 CodeOffset movwWithPatch(Register src, const Operand& dest) {
672 switch (dest.kind()) {
673 case Operand::MEM_REG_DISP:
674 masm.movw_rm_disp32(src.encoding(), dest.disp(), dest.base());
675 break;
676 case Operand::MEM_ADDRESS32:
677 masm.movw_rm(src.encoding(), dest.address());
678 break;
679 default:
680 MOZ_CRASH("unexpected operand kind");
681 }
682 return CodeOffset(masm.currentOffset());
683 }
movlWithPatch(Register src,const Operand & dest)684 CodeOffset movlWithPatch(Register src, const Operand& dest) {
685 switch (dest.kind()) {
686 case Operand::MEM_REG_DISP:
687 masm.movl_rm_disp32(src.encoding(), dest.disp(), dest.base());
688 break;
689 case Operand::MEM_ADDRESS32:
690 masm.movl_rm(src.encoding(), dest.address());
691 break;
692 default:
693 MOZ_CRASH("unexpected operand kind");
694 }
695 return CodeOffset(masm.currentOffset());
696 }
vmovdWithPatch(FloatRegister src,const Operand & dest)697 CodeOffset vmovdWithPatch(FloatRegister src, const Operand& dest) {
698 MOZ_ASSERT(HasSSE2());
699 switch (dest.kind()) {
700 case Operand::MEM_REG_DISP:
701 masm.vmovd_rm_disp32(src.encoding(), dest.disp(), dest.base());
702 break;
703 case Operand::MEM_ADDRESS32:
704 masm.vmovd_rm(src.encoding(), dest.address());
705 break;
706 default:
707 MOZ_CRASH("unexpected operand kind");
708 }
709 return CodeOffset(masm.currentOffset());
710 }
vmovqWithPatch(FloatRegister src,const Operand & dest)711 CodeOffset vmovqWithPatch(FloatRegister src, const Operand& dest) {
712 MOZ_ASSERT(HasSSE2());
713 switch (dest.kind()) {
714 case Operand::MEM_REG_DISP:
715 masm.vmovq_rm_disp32(src.encoding(), dest.disp(), dest.base());
716 break;
717 case Operand::MEM_ADDRESS32:
718 masm.vmovq_rm(src.encoding(), dest.address());
719 break;
720 default:
721 MOZ_CRASH("unexpected operand kind");
722 }
723 return CodeOffset(masm.currentOffset());
724 }
vmovssWithPatch(FloatRegister src,const Operand & dest)725 CodeOffset vmovssWithPatch(FloatRegister src, const Operand& dest) {
726 MOZ_ASSERT(HasSSE2());
727 switch (dest.kind()) {
728 case Operand::MEM_REG_DISP:
729 masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base());
730 break;
731 case Operand::MEM_ADDRESS32:
732 masm.vmovss_rm(src.encoding(), dest.address());
733 break;
734 default:
735 MOZ_CRASH("unexpected operand kind");
736 }
737 return CodeOffset(masm.currentOffset());
738 }
vmovsdWithPatch(FloatRegister src,const Operand & dest)739 CodeOffset vmovsdWithPatch(FloatRegister src, const Operand& dest) {
740 MOZ_ASSERT(HasSSE2());
741 switch (dest.kind()) {
742 case Operand::MEM_REG_DISP:
743 masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base());
744 break;
745 case Operand::MEM_ADDRESS32:
746 masm.vmovsd_rm(src.encoding(), dest.address());
747 break;
748 default:
749 MOZ_CRASH("unexpected operand kind");
750 }
751 return CodeOffset(masm.currentOffset());
752 }
vmovupsWithPatch(FloatRegister src,const Operand & dest)753 CodeOffset vmovupsWithPatch(FloatRegister src, const Operand& dest) {
754 MOZ_ASSERT(HasSSE2());
755 switch (dest.kind()) {
756 case Operand::MEM_REG_DISP:
757 masm.vmovups_rm_disp32(src.encoding(), dest.disp(), dest.base());
758 break;
759 case Operand::MEM_ADDRESS32:
760 masm.vmovups_rm(src.encoding(), dest.address());
761 break;
762 default:
763 MOZ_CRASH("unexpected operand kind");
764 }
765 return CodeOffset(masm.currentOffset());
766 }
vmovdquWithPatch(FloatRegister src,const Operand & dest)767 CodeOffset vmovdquWithPatch(FloatRegister src, const Operand& dest) {
768 MOZ_ASSERT(HasSSE2());
769 switch (dest.kind()) {
770 case Operand::MEM_REG_DISP:
771 masm.vmovdqu_rm_disp32(src.encoding(), dest.disp(), dest.base());
772 break;
773 case Operand::MEM_ADDRESS32:
774 masm.vmovdqu_rm(src.encoding(), dest.address());
775 break;
776 default:
777 MOZ_CRASH("unexpected operand kind");
778 }
779 return CodeOffset(masm.currentOffset());
780 }
781
782 // Load from *(addr + index*scale) where addr can be patched.
movlWithPatch(PatchedAbsoluteAddress addr,Register index,Scale scale,Register dest)783 CodeOffset movlWithPatch(PatchedAbsoluteAddress addr, Register index, Scale scale,
784 Register dest)
785 {
786 masm.movl_mr(addr.addr, index.encoding(), scale, dest.encoding());
787 return CodeOffset(masm.currentOffset());
788 }
789
790 // Load from *src where src can be patched.
movsblWithPatch(PatchedAbsoluteAddress src,Register dest)791 CodeOffset movsblWithPatch(PatchedAbsoluteAddress src, Register dest) {
792 masm.movsbl_mr(src.addr, dest.encoding());
793 return CodeOffset(masm.currentOffset());
794 }
movzblWithPatch(PatchedAbsoluteAddress src,Register dest)795 CodeOffset movzblWithPatch(PatchedAbsoluteAddress src, Register dest) {
796 masm.movzbl_mr(src.addr, dest.encoding());
797 return CodeOffset(masm.currentOffset());
798 }
movswlWithPatch(PatchedAbsoluteAddress src,Register dest)799 CodeOffset movswlWithPatch(PatchedAbsoluteAddress src, Register dest) {
800 masm.movswl_mr(src.addr, dest.encoding());
801 return CodeOffset(masm.currentOffset());
802 }
movzwlWithPatch(PatchedAbsoluteAddress src,Register dest)803 CodeOffset movzwlWithPatch(PatchedAbsoluteAddress src, Register dest) {
804 masm.movzwl_mr(src.addr, dest.encoding());
805 return CodeOffset(masm.currentOffset());
806 }
movlWithPatch(PatchedAbsoluteAddress src,Register dest)807 CodeOffset movlWithPatch(PatchedAbsoluteAddress src, Register dest) {
808 masm.movl_mr(src.addr, dest.encoding());
809 return CodeOffset(masm.currentOffset());
810 }
vmovssWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)811 CodeOffset vmovssWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
812 MOZ_ASSERT(HasSSE2());
813 masm.vmovss_mr(src.addr, dest.encoding());
814 return CodeOffset(masm.currentOffset());
815 }
vmovdWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)816 CodeOffset vmovdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
817 MOZ_ASSERT(HasSSE2());
818 masm.vmovd_mr(src.addr, dest.encoding());
819 return CodeOffset(masm.currentOffset());
820 }
vmovqWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)821 CodeOffset vmovqWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
822 MOZ_ASSERT(HasSSE2());
823 masm.vmovq_mr(src.addr, dest.encoding());
824 return CodeOffset(masm.currentOffset());
825 }
vmovsdWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)826 CodeOffset vmovsdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
827 MOZ_ASSERT(HasSSE2());
828 masm.vmovsd_mr(src.addr, dest.encoding());
829 return CodeOffset(masm.currentOffset());
830 }
vmovdqaWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)831 CodeOffset vmovdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
832 MOZ_ASSERT(HasSSE2());
833 masm.vmovdqa_mr(src.addr, dest.encoding());
834 return CodeOffset(masm.currentOffset());
835 }
vmovdquWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)836 CodeOffset vmovdquWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
837 MOZ_ASSERT(HasSSE2());
838 masm.vmovdqu_mr(src.addr, dest.encoding());
839 return CodeOffset(masm.currentOffset());
840 }
vmovapsWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)841 CodeOffset vmovapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
842 MOZ_ASSERT(HasSSE2());
843 masm.vmovaps_mr(src.addr, dest.encoding());
844 return CodeOffset(masm.currentOffset());
845 }
vmovupsWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)846 CodeOffset vmovupsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
847 MOZ_ASSERT(HasSSE2());
848 masm.vmovups_mr(src.addr, dest.encoding());
849 return CodeOffset(masm.currentOffset());
850 }
851
852 // Store to *dest where dest can be patched.
movbWithPatch(Register src,PatchedAbsoluteAddress dest)853 CodeOffset movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
854 masm.movb_rm(src.encoding(), dest.addr);
855 return CodeOffset(masm.currentOffset());
856 }
movwWithPatch(Register src,PatchedAbsoluteAddress dest)857 CodeOffset movwWithPatch(Register src, PatchedAbsoluteAddress dest) {
858 masm.movw_rm(src.encoding(), dest.addr);
859 return CodeOffset(masm.currentOffset());
860 }
movlWithPatch(Register src,PatchedAbsoluteAddress dest)861 CodeOffset movlWithPatch(Register src, PatchedAbsoluteAddress dest) {
862 masm.movl_rm(src.encoding(), dest.addr);
863 return CodeOffset(masm.currentOffset());
864 }
vmovssWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)865 CodeOffset vmovssWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
866 MOZ_ASSERT(HasSSE2());
867 masm.vmovss_rm(src.encoding(), dest.addr);
868 return CodeOffset(masm.currentOffset());
869 }
vmovdWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)870 CodeOffset vmovdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
871 MOZ_ASSERT(HasSSE2());
872 masm.vmovd_rm(src.encoding(), dest.addr);
873 return CodeOffset(masm.currentOffset());
874 }
vmovqWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)875 CodeOffset vmovqWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
876 MOZ_ASSERT(HasSSE2());
877 masm.vmovq_rm(src.encoding(), dest.addr);
878 return CodeOffset(masm.currentOffset());
879 }
vmovsdWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)880 CodeOffset vmovsdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
881 MOZ_ASSERT(HasSSE2());
882 masm.vmovsd_rm(src.encoding(), dest.addr);
883 return CodeOffset(masm.currentOffset());
884 }
vmovdqaWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)885 CodeOffset vmovdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
886 MOZ_ASSERT(HasSSE2());
887 masm.vmovdqa_rm(src.encoding(), dest.addr);
888 return CodeOffset(masm.currentOffset());
889 }
vmovapsWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)890 CodeOffset vmovapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
891 MOZ_ASSERT(HasSSE2());
892 masm.vmovaps_rm(src.encoding(), dest.addr);
893 return CodeOffset(masm.currentOffset());
894 }
vmovdquWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)895 CodeOffset vmovdquWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
896 MOZ_ASSERT(HasSSE2());
897 masm.vmovdqu_rm(src.encoding(), dest.addr);
898 return CodeOffset(masm.currentOffset());
899 }
vmovupsWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)900 CodeOffset vmovupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
901 MOZ_ASSERT(HasSSE2());
902 masm.vmovups_rm(src.encoding(), dest.addr);
903 return CodeOffset(masm.currentOffset());
904 }
905
loadAsmJSActivation(Register dest)906 void loadAsmJSActivation(Register dest) {
907 CodeOffset label = movlWithPatch(PatchedAbsoluteAddress(), dest);
908 append(AsmJSGlobalAccess(label, wasm::ActivationGlobalDataOffset));
909 }
loadAsmJSHeapRegisterFromGlobalData()910 void loadAsmJSHeapRegisterFromGlobalData() {
911 // x86 doesn't have a pinned heap register.
912 }
913
canUseInSingleByteInstruction(Register reg)914 static bool canUseInSingleByteInstruction(Register reg) {
915 return X86Encoding::HasSubregL(reg.encoding());
916 }
917 };
918
919 // Get a register in which we plan to put a quantity that will be used as an
920 // integer argument. This differs from GetIntArgReg in that if we have no more
921 // actual argument registers to use we will fall back on using whatever
922 // CallTempReg* don't overlap the argument registers, and only fail once those
923 // run out too.
924 static inline bool
GetTempRegForIntArg(uint32_t usedIntArgs,uint32_t usedFloatArgs,Register * out)925 GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
926 {
927 if (usedIntArgs >= NumCallTempNonArgRegs)
928 return false;
929 *out = CallTempNonArgRegs[usedIntArgs];
930 return true;
931 }
932
933 } // namespace jit
934 } // namespace js
935
936 #endif /* jit_x86_Assembler_x86_h */
937