1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_x86_Assembler_x86_h
8 #define jit_x86_Assembler_x86_h
9
10 #include "mozilla/ArrayUtils.h"
11
12 #include "jit/CompactBuffer.h"
13 #include "jit/IonCode.h"
14 #include "jit/JitCompartment.h"
15 #include "jit/shared/Assembler-shared.h"
16 #include "jit/x86-shared/Constants-x86-shared.h"
17
18 namespace js {
19 namespace jit {
20
21 static constexpr Register eax = { X86Encoding::rax };
22 static constexpr Register ecx = { X86Encoding::rcx };
23 static constexpr Register edx = { X86Encoding::rdx };
24 static constexpr Register ebx = { X86Encoding::rbx };
25 static constexpr Register esp = { X86Encoding::rsp };
26 static constexpr Register ebp = { X86Encoding::rbp };
27 static constexpr Register esi = { X86Encoding::rsi };
28 static constexpr Register edi = { X86Encoding::rdi };
29
30 static constexpr FloatRegister xmm0 = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
31 static constexpr FloatRegister xmm1 = FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
32 static constexpr FloatRegister xmm2 = FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
33 static constexpr FloatRegister xmm3 = FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
34 static constexpr FloatRegister xmm4 = FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
35 static constexpr FloatRegister xmm5 = FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
36 static constexpr FloatRegister xmm6 = FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
37 static constexpr FloatRegister xmm7 = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
38
39 static constexpr Register InvalidReg = { X86Encoding::invalid_reg };
40 static constexpr FloatRegister InvalidFloatReg = FloatRegister();
41
42 static constexpr Register JSReturnReg_Type = ecx;
43 static constexpr Register JSReturnReg_Data = edx;
44 static constexpr Register StackPointer = esp;
45 static constexpr Register FramePointer = ebp;
46 static constexpr Register ReturnReg = eax;
47 static constexpr Register64 ReturnReg64(edi, eax);
48 static constexpr FloatRegister ReturnFloat32Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
49 static constexpr FloatRegister ReturnDoubleReg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
50 static constexpr FloatRegister ReturnSimd128Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
51 static constexpr FloatRegister ScratchFloat32Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Single);
52 static constexpr FloatRegister ScratchDoubleReg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
53 static constexpr FloatRegister ScratchSimd128Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Simd128);
54
55 // Avoid ebp, which is the FramePointer, which is unavailable in some modes.
56 static constexpr Register ArgumentsRectifierReg = esi;
57 static constexpr Register CallTempReg0 = edi;
58 static constexpr Register CallTempReg1 = eax;
59 static constexpr Register CallTempReg2 = ebx;
60 static constexpr Register CallTempReg3 = ecx;
61 static constexpr Register CallTempReg4 = esi;
62 static constexpr Register CallTempReg5 = edx;
63
64 // We have no arg regs, so our NonArgRegs are just our CallTempReg*
65 // Use "const" instead of constexpr here to work around a bug
66 // of VS2015 Update 1. See bug 1229604.
67 static const Register CallTempNonArgRegs[] = { edi, eax, ebx, ecx, esi, edx };
68 static const uint32_t NumCallTempNonArgRegs =
69 mozilla::ArrayLength(CallTempNonArgRegs);
70
71 class ABIArgGenerator
72 {
73 uint32_t stackOffset_;
74 ABIArg current_;
75
76 public:
77 ABIArgGenerator();
78 ABIArg next(MIRType argType);
current()79 ABIArg& current() { return current_; }
stackBytesConsumedSoFar()80 uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
81
82 };
83
84 static constexpr Register ABINonArgReg0 = eax;
85 static constexpr Register ABINonArgReg1 = ebx;
86 static constexpr Register ABINonArgReg2 = ecx;
87
88 // Note: these three registers are all guaranteed to be different
89 static constexpr Register ABINonArgReturnReg0 = ecx;
90 static constexpr Register ABINonArgReturnReg1 = edx;
91 static constexpr Register ABINonVolatileReg = ebx;
92
93 // TLS pointer argument register for WebAssembly functions. This must not alias
94 // any other register used for passing function arguments or return values.
95 // Preserved by WebAssembly functions.
96 static constexpr Register WasmTlsReg = esi;
97
98 // Registers used for asm.js/wasm table calls. These registers must be disjoint
99 // from the ABI argument registers, WasmTlsReg and each other.
100 static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
101 static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
102 static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
103
104 static constexpr Register OsrFrameReg = edx;
105 static constexpr Register PreBarrierReg = edx;
106
107 // Registers used in the GenerateFFIIonExit Enable Activation block.
108 static constexpr Register WasmIonExitRegCallee = ecx;
109 static constexpr Register WasmIonExitRegE0 = edi;
110 static constexpr Register WasmIonExitRegE1 = eax;
111
112 // Registers used in the GenerateFFIIonExit Disable Activation block.
113 static constexpr Register WasmIonExitRegReturnData = edx;
114 static constexpr Register WasmIonExitRegReturnType = ecx;
115 static constexpr Register WasmIonExitRegD0 = edi;
116 static constexpr Register WasmIonExitRegD1 = eax;
117 static constexpr Register WasmIonExitRegD2 = esi;
118
119 // Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
120 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
121 static constexpr Register RegExpMatcherStringReg = CallTempReg1;
122 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
123
124 // Registerd used in RegExpTester instruction (do not use ReturnReg).
125 static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
126 static constexpr Register RegExpTesterStringReg = CallTempReg2;
127 static constexpr Register RegExpTesterLastIndexReg = CallTempReg3;
128
129 // GCC stack is aligned on 16 bytes. Ion does not maintain this for internal
130 // calls. wasm code does.
131 #if defined(__GNUC__)
132 static constexpr uint32_t ABIStackAlignment = 16;
133 #else
134 static constexpr uint32_t ABIStackAlignment = 4;
135 #endif
136 static constexpr uint32_t CodeAlignment = 16;
137 static constexpr uint32_t JitStackAlignment = 16;
138
139 static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
140 static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
141 "Stack alignment should be a non-zero multiple of sizeof(Value)");
142
143 // This boolean indicates whether we support SIMD instructions flavoured for
144 // this architecture or not. Rather than a method in the LIRGenerator, it is
145 // here such that it is accessible from the entire codebase. Once full support
146 // for SIMD is reached on all tier-1 platforms, this constant can be deleted.
147 static constexpr bool SupportsSimd = true;
148 static constexpr uint32_t SimdMemoryAlignment = 16;
149
150 static_assert(CodeAlignment % SimdMemoryAlignment == 0,
151 "Code alignment should be larger than any of the alignments which are used for "
152 "the constant sections of the code buffer. Thus it should be larger than the "
153 "alignment for SIMD constants.");
154
155 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
156 "Stack alignment should be larger than any of the alignments which are used for "
157 "spilled values. Thus it should be larger than the alignment for SIMD accesses.");
158
159 static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
160
161 struct ImmTag : public Imm32
162 {
ImmTagImmTag163 explicit ImmTag(JSValueTag mask)
164 : Imm32(int32_t(mask))
165 { }
166 };
167
168 struct ImmType : public ImmTag
169 {
ImmTypeImmType170 explicit ImmType(JSValueType type)
171 : ImmTag(JSVAL_TYPE_TO_TAG(type))
172 { }
173 };
174
175 static const Scale ScalePointer = TimesFour;
176
177 } // namespace jit
178 } // namespace js
179
180 #include "jit/x86-shared/Assembler-x86-shared.h"
181
182 namespace js {
183 namespace jit {
184
185 static inline void
186 PatchJump(CodeLocationJump jump, CodeLocationLabel label, ReprotectCode reprotect = DontReprotect)
187 {
188 #ifdef DEBUG
189 // Assert that we're overwriting a jump instruction, either:
190 // 0F 80+cc <imm32>, or
191 // E9 <imm32>
192 unsigned char* x = (unsigned char*)jump.raw() - 5;
193 MOZ_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) ||
194 (*x == 0xE9));
195 #endif
196 MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
197 X86Encoding::SetRel32(jump.raw(), label.raw());
198 }
199 static inline void
PatchBackedge(CodeLocationJump & jump_,CodeLocationLabel label,JitRuntime::BackedgeTarget target)200 PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
201 {
202 PatchJump(jump_, label);
203 }
204
205 // Return operand from a JS -> JS call.
206 static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
207
208 class Assembler : public AssemblerX86Shared
209 {
writeRelocation(JmpSrc src)210 void writeRelocation(JmpSrc src) {
211 jumpRelocations_.writeUnsigned(src.offset());
212 }
addPendingJump(JmpSrc src,ImmPtr target,Relocation::Kind kind)213 void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind kind) {
214 enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, kind));
215 if (kind == Relocation::JITCODE)
216 writeRelocation(src);
217 }
218
219 public:
220 using AssemblerX86Shared::movl;
221 using AssemblerX86Shared::j;
222 using AssemblerX86Shared::jmp;
223 using AssemblerX86Shared::vmovsd;
224 using AssemblerX86Shared::vmovss;
225 using AssemblerX86Shared::retarget;
226 using AssemblerX86Shared::cmpl;
227 using AssemblerX86Shared::call;
228 using AssemblerX86Shared::push;
229 using AssemblerX86Shared::pop;
230
231 static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
232
233 // Copy the assembly code to the given buffer, and perform any pending
234 // relocations relying on the target address.
235 void executableCopy(uint8_t* buffer);
236
237 // Actual assembly emitting functions.
238
push(ImmGCPtr ptr)239 void push(ImmGCPtr ptr) {
240 masm.push_i32(int32_t(ptr.value));
241 writeDataRelocation(ptr);
242 }
push(const ImmWord imm)243 void push(const ImmWord imm) {
244 push(Imm32(imm.value));
245 }
push(const ImmPtr imm)246 void push(const ImmPtr imm) {
247 push(ImmWord(uintptr_t(imm.value)));
248 }
push(FloatRegister src)249 void push(FloatRegister src) {
250 subl(Imm32(sizeof(double)), StackPointer);
251 vmovsd(src, Address(StackPointer, 0));
252 }
253
pushWithPatch(ImmWord word)254 CodeOffset pushWithPatch(ImmWord word) {
255 masm.push_i32(int32_t(word.value));
256 return CodeOffset(masm.currentOffset());
257 }
258
pop(FloatRegister src)259 void pop(FloatRegister src) {
260 vmovsd(Address(StackPointer, 0), src);
261 addl(Imm32(sizeof(double)), StackPointer);
262 }
263
movWithPatch(ImmWord word,Register dest)264 CodeOffset movWithPatch(ImmWord word, Register dest) {
265 movl(Imm32(word.value), dest);
266 return CodeOffset(masm.currentOffset());
267 }
movWithPatch(ImmPtr imm,Register dest)268 CodeOffset movWithPatch(ImmPtr imm, Register dest) {
269 return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
270 }
271
movl(ImmGCPtr ptr,Register dest)272 void movl(ImmGCPtr ptr, Register dest) {
273 masm.movl_i32r(uintptr_t(ptr.value), dest.encoding());
274 writeDataRelocation(ptr);
275 }
movl(ImmGCPtr ptr,const Operand & dest)276 void movl(ImmGCPtr ptr, const Operand& dest) {
277 switch (dest.kind()) {
278 case Operand::REG:
279 masm.movl_i32r(uintptr_t(ptr.value), dest.reg());
280 writeDataRelocation(ptr);
281 break;
282 case Operand::MEM_REG_DISP:
283 masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base());
284 writeDataRelocation(ptr);
285 break;
286 case Operand::MEM_SCALE:
287 masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base(), dest.index(), dest.scale());
288 writeDataRelocation(ptr);
289 break;
290 default:
291 MOZ_CRASH("unexpected operand kind");
292 }
293 }
movl(ImmWord imm,Register dest)294 void movl(ImmWord imm, Register dest) {
295 masm.movl_i32r(imm.value, dest.encoding());
296 }
movl(ImmPtr imm,Register dest)297 void movl(ImmPtr imm, Register dest) {
298 movl(ImmWord(uintptr_t(imm.value)), dest);
299 }
mov(ImmWord imm,Register dest)300 void mov(ImmWord imm, Register dest) {
301 // Use xor for setting registers to zero, as it is specially optimized
302 // for this purpose on modern hardware. Note that it does clobber FLAGS
303 // though.
304 if (imm.value == 0)
305 xorl(dest, dest);
306 else
307 movl(imm, dest);
308 }
mov(ImmPtr imm,Register dest)309 void mov(ImmPtr imm, Register dest) {
310 mov(ImmWord(uintptr_t(imm.value)), dest);
311 }
mov(wasm::SymbolicAddress imm,Register dest)312 void mov(wasm::SymbolicAddress imm, Register dest) {
313 masm.movl_i32r(-1, dest.encoding());
314 append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm));
315 }
mov(const Operand & src,Register dest)316 void mov(const Operand& src, Register dest) {
317 movl(src, dest);
318 }
mov(Register src,const Operand & dest)319 void mov(Register src, const Operand& dest) {
320 movl(src, dest);
321 }
mov(Imm32 imm,const Operand & dest)322 void mov(Imm32 imm, const Operand& dest) {
323 movl(imm, dest);
324 }
mov(CodeOffset * label,Register dest)325 void mov(CodeOffset* label, Register dest) {
326 // Put a placeholder value in the instruction stream.
327 masm.movl_i32r(0, dest.encoding());
328 label->bind(masm.size());
329 }
mov(Register src,Register dest)330 void mov(Register src, Register dest) {
331 movl(src, dest);
332 }
xchg(Register src,Register dest)333 void xchg(Register src, Register dest) {
334 xchgl(src, dest);
335 }
lea(const Operand & src,Register dest)336 void lea(const Operand& src, Register dest) {
337 return leal(src, dest);
338 }
339
fstp32(const Operand & src)340 void fstp32(const Operand& src) {
341 switch (src.kind()) {
342 case Operand::MEM_REG_DISP:
343 masm.fstp32_m(src.disp(), src.base());
344 break;
345 default:
346 MOZ_CRASH("unexpected operand kind");
347 }
348 }
faddp()349 void faddp() {
350 masm.faddp();
351 }
352
cmpl(ImmWord rhs,Register lhs)353 void cmpl(ImmWord rhs, Register lhs) {
354 masm.cmpl_ir(rhs.value, lhs.encoding());
355 }
cmpl(ImmPtr rhs,Register lhs)356 void cmpl(ImmPtr rhs, Register lhs) {
357 cmpl(ImmWord(uintptr_t(rhs.value)), lhs);
358 }
cmpl(ImmGCPtr rhs,Register lhs)359 void cmpl(ImmGCPtr rhs, Register lhs) {
360 masm.cmpl_i32r(uintptr_t(rhs.value), lhs.encoding());
361 writeDataRelocation(rhs);
362 }
cmpl(Register rhs,Register lhs)363 void cmpl(Register rhs, Register lhs) {
364 masm.cmpl_rr(rhs.encoding(), lhs.encoding());
365 }
cmpl(ImmGCPtr rhs,const Operand & lhs)366 void cmpl(ImmGCPtr rhs, const Operand& lhs) {
367 switch (lhs.kind()) {
368 case Operand::REG:
369 masm.cmpl_i32r(uintptr_t(rhs.value), lhs.reg());
370 writeDataRelocation(rhs);
371 break;
372 case Operand::MEM_REG_DISP:
373 masm.cmpl_i32m(uintptr_t(rhs.value), lhs.disp(), lhs.base());
374 writeDataRelocation(rhs);
375 break;
376 case Operand::MEM_ADDRESS32:
377 masm.cmpl_i32m(uintptr_t(rhs.value), lhs.address());
378 writeDataRelocation(rhs);
379 break;
380 default:
381 MOZ_CRASH("unexpected operand kind");
382 }
383 }
cmpl(Register rhs,wasm::SymbolicAddress lhs)384 void cmpl(Register rhs, wasm::SymbolicAddress lhs) {
385 masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
386 append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), lhs));
387 }
cmpl(Imm32 rhs,wasm::SymbolicAddress lhs)388 void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) {
389 JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
390 append(wasm::SymbolicAccess(CodeOffset(src.offset()), lhs));
391 }
392
adcl(Imm32 imm,Register dest)393 void adcl(Imm32 imm, Register dest) {
394 masm.adcl_ir(imm.value, dest.encoding());
395 }
adcl(Register src,Register dest)396 void adcl(Register src, Register dest) {
397 masm.adcl_rr(src.encoding(), dest.encoding());
398 }
399
sbbl(Imm32 imm,Register dest)400 void sbbl(Imm32 imm, Register dest) {
401 masm.sbbl_ir(imm.value, dest.encoding());
402 }
sbbl(Register src,Register dest)403 void sbbl(Register src, Register dest) {
404 masm.sbbl_rr(src.encoding(), dest.encoding());
405 }
406
mull(Register multiplier)407 void mull(Register multiplier) {
408 masm.mull_r(multiplier.encoding());
409 }
410
shldl(const Imm32 imm,Register src,Register dest)411 void shldl(const Imm32 imm, Register src, Register dest) {
412 masm.shldl_irr(imm.value, src.encoding(), dest.encoding());
413 }
shrdl(const Imm32 imm,Register src,Register dest)414 void shrdl(const Imm32 imm, Register src, Register dest) {
415 masm.shrdl_irr(imm.value, src.encoding(), dest.encoding());
416 }
417
vhaddpd(FloatRegister src,FloatRegister dest)418 void vhaddpd(FloatRegister src, FloatRegister dest) {
419 MOZ_ASSERT(HasSSE3());
420 MOZ_ASSERT(src.size() == 16);
421 MOZ_ASSERT(dest.size() == 16);
422 masm.vhaddpd_rr(src.encoding(), dest.encoding());
423 }
vsubpd(const Operand & src1,FloatRegister src0,FloatRegister dest)424 void vsubpd(const Operand& src1, FloatRegister src0, FloatRegister dest) {
425 MOZ_ASSERT(HasSSE2());
426 MOZ_ASSERT(src0.size() == 16);
427 MOZ_ASSERT(dest.size() == 16);
428 switch (src1.kind()) {
429 case Operand::MEM_REG_DISP:
430 masm.vsubpd_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
431 break;
432 case Operand::MEM_ADDRESS32:
433 masm.vsubpd_mr(src1.address(), src0.encoding(), dest.encoding());
434 break;
435 default:
436 MOZ_CRASH("unexpected operand kind");
437 }
438 }
439
vpunpckldq(FloatRegister src1,FloatRegister src0,FloatRegister dest)440 void vpunpckldq(FloatRegister src1, FloatRegister src0, FloatRegister dest) {
441 MOZ_ASSERT(HasSSE2());
442 MOZ_ASSERT(src0.size() == 16);
443 MOZ_ASSERT(src1.size() == 16);
444 MOZ_ASSERT(dest.size() == 16);
445 masm.vpunpckldq_rr(src1.encoding(), src0.encoding(), dest.encoding());
446 }
vpunpckldq(const Operand & src1,FloatRegister src0,FloatRegister dest)447 void vpunpckldq(const Operand& src1, FloatRegister src0, FloatRegister dest) {
448 MOZ_ASSERT(HasSSE2());
449 MOZ_ASSERT(src0.size() == 16);
450 MOZ_ASSERT(dest.size() == 16);
451 switch (src1.kind()) {
452 case Operand::MEM_REG_DISP:
453 masm.vpunpckldq_mr(src1.disp(), src1.base(), src0.encoding(), dest.encoding());
454 break;
455 case Operand::MEM_ADDRESS32:
456 masm.vpunpckldq_mr(src1.address(), src0.encoding(), dest.encoding());
457 break;
458 default:
459 MOZ_CRASH("unexpected operand kind");
460 }
461 }
462
fild(const Operand & src)463 void fild(const Operand& src) {
464 switch (src.kind()) {
465 case Operand::MEM_REG_DISP:
466 masm.fild_m(src.disp(), src.base());
467 break;
468 default:
469 MOZ_CRASH("unexpected operand kind");
470 }
471 }
472
473 void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
474 JmpSrc src = masm.jmp();
475 addPendingJump(src, target, reloc);
476 }
477 void j(Condition cond, ImmPtr target,
478 Relocation::Kind reloc = Relocation::HARDCODED) {
479 JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
480 addPendingJump(src, target, reloc);
481 }
482
jmp(JitCode * target)483 void jmp(JitCode* target) {
484 jmp(ImmPtr(target->raw()), Relocation::JITCODE);
485 }
j(Condition cond,JitCode * target)486 void j(Condition cond, JitCode* target) {
487 j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
488 }
call(JitCode * target)489 void call(JitCode* target) {
490 JmpSrc src = masm.call();
491 addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
492 }
call(ImmWord target)493 void call(ImmWord target) {
494 call(ImmPtr((void*)target.value));
495 }
call(ImmPtr target)496 void call(ImmPtr target) {
497 JmpSrc src = masm.call();
498 addPendingJump(src, target, Relocation::HARDCODED);
499 }
500
501 // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
502 // this instruction.
toggledCall(JitCode * target,bool enabled)503 CodeOffset toggledCall(JitCode* target, bool enabled) {
504 CodeOffset offset(size());
505 JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
506 addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
507 MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
508 return offset;
509 }
510
ToggledCallSize(uint8_t * code)511 static size_t ToggledCallSize(uint8_t* code) {
512 // Size of a call instruction.
513 return 5;
514 }
515
516 // Re-routes pending jumps to an external target, flushing the label in the
517 // process.
retarget(Label * label,ImmPtr target,Relocation::Kind reloc)518 void retarget(Label* label, ImmPtr target, Relocation::Kind reloc) {
519 if (label->used()) {
520 bool more;
521 X86Encoding::JmpSrc jmp(label->offset());
522 do {
523 X86Encoding::JmpSrc next;
524 more = masm.nextJump(jmp, &next);
525 addPendingJump(jmp, target, reloc);
526 jmp = next;
527 } while (more);
528 }
529 label->reset();
530 }
531
532 // Move a 32-bit immediate into a register where the immediate can be
533 // patched.
movlWithPatch(Imm32 imm,Register dest)534 CodeOffset movlWithPatch(Imm32 imm, Register dest) {
535 masm.movl_i32r(imm.value, dest.encoding());
536 return CodeOffset(masm.currentOffset());
537 }
538
539 // Load from *(base + disp32) where disp32 can be patched.
movsblWithPatch(const Operand & src,Register dest)540 CodeOffset movsblWithPatch(const Operand& src, Register dest) {
541 switch (src.kind()) {
542 case Operand::MEM_REG_DISP:
543 masm.movsbl_mr_disp32(src.disp(), src.base(), dest.encoding());
544 break;
545 case Operand::MEM_ADDRESS32:
546 masm.movsbl_mr(src.address(), dest.encoding());
547 break;
548 default:
549 MOZ_CRASH("unexpected operand kind");
550 }
551 return CodeOffset(masm.currentOffset());
552 }
movzblWithPatch(const Operand & src,Register dest)553 CodeOffset movzblWithPatch(const Operand& src, Register dest) {
554 switch (src.kind()) {
555 case Operand::MEM_REG_DISP:
556 masm.movzbl_mr_disp32(src.disp(), src.base(), dest.encoding());
557 break;
558 case Operand::MEM_ADDRESS32:
559 masm.movzbl_mr(src.address(), dest.encoding());
560 break;
561 default:
562 MOZ_CRASH("unexpected operand kind");
563 }
564 return CodeOffset(masm.currentOffset());
565 }
movswlWithPatch(const Operand & src,Register dest)566 CodeOffset movswlWithPatch(const Operand& src, Register dest) {
567 switch (src.kind()) {
568 case Operand::MEM_REG_DISP:
569 masm.movswl_mr_disp32(src.disp(), src.base(), dest.encoding());
570 break;
571 case Operand::MEM_ADDRESS32:
572 masm.movswl_mr(src.address(), dest.encoding());
573 break;
574 default:
575 MOZ_CRASH("unexpected operand kind");
576 }
577 return CodeOffset(masm.currentOffset());
578 }
movzwlWithPatch(const Operand & src,Register dest)579 CodeOffset movzwlWithPatch(const Operand& src, Register dest) {
580 switch (src.kind()) {
581 case Operand::MEM_REG_DISP:
582 masm.movzwl_mr_disp32(src.disp(), src.base(), dest.encoding());
583 break;
584 case Operand::MEM_ADDRESS32:
585 masm.movzwl_mr(src.address(), dest.encoding());
586 break;
587 default:
588 MOZ_CRASH("unexpected operand kind");
589 }
590 return CodeOffset(masm.currentOffset());
591 }
movlWithPatch(const Operand & src,Register dest)592 CodeOffset movlWithPatch(const Operand& src, Register dest) {
593 switch (src.kind()) {
594 case Operand::MEM_REG_DISP:
595 masm.movl_mr_disp32(src.disp(), src.base(), dest.encoding());
596 break;
597 case Operand::MEM_ADDRESS32:
598 masm.movl_mr(src.address(), dest.encoding());
599 break;
600 default:
601 MOZ_CRASH("unexpected operand kind");
602 }
603 return CodeOffset(masm.currentOffset());
604 }
vmovssWithPatch(const Operand & src,FloatRegister dest)605 CodeOffset vmovssWithPatch(const Operand& src, FloatRegister dest) {
606 MOZ_ASSERT(HasSSE2());
607 switch (src.kind()) {
608 case Operand::MEM_REG_DISP:
609 masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding());
610 break;
611 case Operand::MEM_ADDRESS32:
612 masm.vmovss_mr(src.address(), dest.encoding());
613 break;
614 default:
615 MOZ_CRASH("unexpected operand kind");
616 }
617 return CodeOffset(masm.currentOffset());
618 }
vmovdWithPatch(const Operand & src,FloatRegister dest)619 CodeOffset vmovdWithPatch(const Operand& src, FloatRegister dest) {
620 MOZ_ASSERT(HasSSE2());
621 switch (src.kind()) {
622 case Operand::MEM_REG_DISP:
623 masm.vmovd_mr_disp32(src.disp(), src.base(), dest.encoding());
624 break;
625 case Operand::MEM_ADDRESS32:
626 masm.vmovd_mr(src.address(), dest.encoding());
627 break;
628 default:
629 MOZ_CRASH("unexpected operand kind");
630 }
631 return CodeOffset(masm.currentOffset());
632 }
vmovqWithPatch(const Operand & src,FloatRegister dest)633 CodeOffset vmovqWithPatch(const Operand& src, FloatRegister dest) {
634 MOZ_ASSERT(HasSSE2());
635 switch (src.kind()) {
636 case Operand::MEM_REG_DISP:
637 masm.vmovq_mr_disp32(src.disp(), src.base(), dest.encoding());
638 break;
639 case Operand::MEM_ADDRESS32:
640 masm.vmovq_mr(src.address(), dest.encoding());
641 break;
642 default:
643 MOZ_CRASH("unexpected operand kind");
644 }
645 return CodeOffset(masm.currentOffset());
646 }
vmovsdWithPatch(const Operand & src,FloatRegister dest)647 CodeOffset vmovsdWithPatch(const Operand& src, FloatRegister dest) {
648 MOZ_ASSERT(HasSSE2());
649 switch (src.kind()) {
650 case Operand::MEM_REG_DISP:
651 masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding());
652 break;
653 case Operand::MEM_ADDRESS32:
654 masm.vmovsd_mr(src.address(), dest.encoding());
655 break;
656 default:
657 MOZ_CRASH("unexpected operand kind");
658 }
659 return CodeOffset(masm.currentOffset());
660 }
vmovupsWithPatch(const Operand & src,FloatRegister dest)661 CodeOffset vmovupsWithPatch(const Operand& src, FloatRegister dest) {
662 MOZ_ASSERT(HasSSE2());
663 switch (src.kind()) {
664 case Operand::MEM_REG_DISP:
665 masm.vmovups_mr_disp32(src.disp(), src.base(), dest.encoding());
666 break;
667 case Operand::MEM_ADDRESS32:
668 masm.vmovups_mr(src.address(), dest.encoding());
669 break;
670 default:
671 MOZ_CRASH("unexpected operand kind");
672 }
673 return CodeOffset(masm.currentOffset());
674 }
vmovdquWithPatch(const Operand & src,FloatRegister dest)675 CodeOffset vmovdquWithPatch(const Operand& src, FloatRegister dest) {
676 MOZ_ASSERT(HasSSE2());
677 switch (src.kind()) {
678 case Operand::MEM_REG_DISP:
679 masm.vmovdqu_mr_disp32(src.disp(), src.base(), dest.encoding());
680 break;
681 case Operand::MEM_ADDRESS32:
682 masm.vmovdqu_mr(src.address(), dest.encoding());
683 break;
684 default:
685 MOZ_CRASH("unexpected operand kind");
686 }
687 return CodeOffset(masm.currentOffset());
688 }
689
690 // Store to *(base + disp32) where disp32 can be patched.
movbWithPatch(Register src,const Operand & dest)691 CodeOffset movbWithPatch(Register src, const Operand& dest) {
692 switch (dest.kind()) {
693 case Operand::MEM_REG_DISP:
694 masm.movb_rm_disp32(src.encoding(), dest.disp(), dest.base());
695 break;
696 case Operand::MEM_ADDRESS32:
697 masm.movb_rm(src.encoding(), dest.address());
698 break;
699 default:
700 MOZ_CRASH("unexpected operand kind");
701 }
702 return CodeOffset(masm.currentOffset());
703 }
movwWithPatch(Register src,const Operand & dest)704 CodeOffset movwWithPatch(Register src, const Operand& dest) {
705 switch (dest.kind()) {
706 case Operand::MEM_REG_DISP:
707 masm.movw_rm_disp32(src.encoding(), dest.disp(), dest.base());
708 break;
709 case Operand::MEM_ADDRESS32:
710 masm.movw_rm(src.encoding(), dest.address());
711 break;
712 default:
713 MOZ_CRASH("unexpected operand kind");
714 }
715 return CodeOffset(masm.currentOffset());
716 }
movlWithPatch(Register src,const Operand & dest)717 CodeOffset movlWithPatch(Register src, const Operand& dest) {
718 switch (dest.kind()) {
719 case Operand::MEM_REG_DISP:
720 masm.movl_rm_disp32(src.encoding(), dest.disp(), dest.base());
721 break;
722 case Operand::MEM_ADDRESS32:
723 masm.movl_rm(src.encoding(), dest.address());
724 break;
725 default:
726 MOZ_CRASH("unexpected operand kind");
727 }
728 return CodeOffset(masm.currentOffset());
729 }
movlWithPatchLow(Register regLow,const Operand & dest)730 CodeOffset movlWithPatchLow(Register regLow, const Operand& dest) {
731 switch (dest.kind()) {
732 case Operand::MEM_REG_DISP: {
733 Address addr = dest.toAddress();
734 Operand low(addr.base, addr.offset + INT64LOW_OFFSET);
735 return movlWithPatch(regLow, low);
736 }
737 case Operand::MEM_ADDRESS32: {
738 Operand low(PatchedAbsoluteAddress(uint32_t(dest.address()) + INT64LOW_OFFSET));
739 return movlWithPatch(regLow, low);
740 }
741 default:
742 MOZ_CRASH("unexpected operand kind");
743 }
744 }
movlWithPatchHigh(Register regHigh,const Operand & dest)745 CodeOffset movlWithPatchHigh(Register regHigh, const Operand& dest) {
746 switch (dest.kind()) {
747 case Operand::MEM_REG_DISP: {
748 Address addr = dest.toAddress();
749 Operand high(addr.base, addr.offset + INT64HIGH_OFFSET);
750 return movlWithPatch(regHigh, high);
751 }
752 case Operand::MEM_ADDRESS32: {
753 Operand high(PatchedAbsoluteAddress(uint32_t(dest.address()) + INT64HIGH_OFFSET));
754 return movlWithPatch(regHigh, high);
755 }
756 default:
757 MOZ_CRASH("unexpected operand kind");
758 }
759 }
vmovdWithPatch(FloatRegister src,const Operand & dest)760 CodeOffset vmovdWithPatch(FloatRegister src, const Operand& dest) {
761 MOZ_ASSERT(HasSSE2());
762 switch (dest.kind()) {
763 case Operand::MEM_REG_DISP:
764 masm.vmovd_rm_disp32(src.encoding(), dest.disp(), dest.base());
765 break;
766 case Operand::MEM_ADDRESS32:
767 masm.vmovd_rm(src.encoding(), dest.address());
768 break;
769 default:
770 MOZ_CRASH("unexpected operand kind");
771 }
772 return CodeOffset(masm.currentOffset());
773 }
vmovqWithPatch(FloatRegister src,const Operand & dest)774 CodeOffset vmovqWithPatch(FloatRegister src, const Operand& dest) {
775 MOZ_ASSERT(HasSSE2());
776 switch (dest.kind()) {
777 case Operand::MEM_REG_DISP:
778 masm.vmovq_rm_disp32(src.encoding(), dest.disp(), dest.base());
779 break;
780 case Operand::MEM_ADDRESS32:
781 masm.vmovq_rm(src.encoding(), dest.address());
782 break;
783 default:
784 MOZ_CRASH("unexpected operand kind");
785 }
786 return CodeOffset(masm.currentOffset());
787 }
vmovssWithPatch(FloatRegister src,const Operand & dest)788 CodeOffset vmovssWithPatch(FloatRegister src, const Operand& dest) {
789 MOZ_ASSERT(HasSSE2());
790 switch (dest.kind()) {
791 case Operand::MEM_REG_DISP:
792 masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base());
793 break;
794 case Operand::MEM_ADDRESS32:
795 masm.vmovss_rm(src.encoding(), dest.address());
796 break;
797 default:
798 MOZ_CRASH("unexpected operand kind");
799 }
800 return CodeOffset(masm.currentOffset());
801 }
vmovsdWithPatch(FloatRegister src,const Operand & dest)802 CodeOffset vmovsdWithPatch(FloatRegister src, const Operand& dest) {
803 MOZ_ASSERT(HasSSE2());
804 switch (dest.kind()) {
805 case Operand::MEM_REG_DISP:
806 masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base());
807 break;
808 case Operand::MEM_ADDRESS32:
809 masm.vmovsd_rm(src.encoding(), dest.address());
810 break;
811 default:
812 MOZ_CRASH("unexpected operand kind");
813 }
814 return CodeOffset(masm.currentOffset());
815 }
vmovupsWithPatch(FloatRegister src,const Operand & dest)816 CodeOffset vmovupsWithPatch(FloatRegister src, const Operand& dest) {
817 MOZ_ASSERT(HasSSE2());
818 switch (dest.kind()) {
819 case Operand::MEM_REG_DISP:
820 masm.vmovups_rm_disp32(src.encoding(), dest.disp(), dest.base());
821 break;
822 case Operand::MEM_ADDRESS32:
823 masm.vmovups_rm(src.encoding(), dest.address());
824 break;
825 default:
826 MOZ_CRASH("unexpected operand kind");
827 }
828 return CodeOffset(masm.currentOffset());
829 }
vmovdquWithPatch(FloatRegister src,const Operand & dest)830 CodeOffset vmovdquWithPatch(FloatRegister src, const Operand& dest) {
831 MOZ_ASSERT(HasSSE2());
832 switch (dest.kind()) {
833 case Operand::MEM_REG_DISP:
834 masm.vmovdqu_rm_disp32(src.encoding(), dest.disp(), dest.base());
835 break;
836 case Operand::MEM_ADDRESS32:
837 masm.vmovdqu_rm(src.encoding(), dest.address());
838 break;
839 default:
840 MOZ_CRASH("unexpected operand kind");
841 }
842 return CodeOffset(masm.currentOffset());
843 }
844
845 // Load from *(addr + index*scale) where addr can be patched.
movlWithPatch(PatchedAbsoluteAddress addr,Register index,Scale scale,Register dest)846 CodeOffset movlWithPatch(PatchedAbsoluteAddress addr, Register index, Scale scale,
847 Register dest)
848 {
849 masm.movl_mr(addr.addr, index.encoding(), scale, dest.encoding());
850 return CodeOffset(masm.currentOffset());
851 }
852
853 // Load from *src where src can be patched.
movsblWithPatch(PatchedAbsoluteAddress src,Register dest)854 CodeOffset movsblWithPatch(PatchedAbsoluteAddress src, Register dest) {
855 masm.movsbl_mr(src.addr, dest.encoding());
856 return CodeOffset(masm.currentOffset());
857 }
movzblWithPatch(PatchedAbsoluteAddress src,Register dest)858 CodeOffset movzblWithPatch(PatchedAbsoluteAddress src, Register dest) {
859 masm.movzbl_mr(src.addr, dest.encoding());
860 return CodeOffset(masm.currentOffset());
861 }
movswlWithPatch(PatchedAbsoluteAddress src,Register dest)862 CodeOffset movswlWithPatch(PatchedAbsoluteAddress src, Register dest) {
863 masm.movswl_mr(src.addr, dest.encoding());
864 return CodeOffset(masm.currentOffset());
865 }
movzwlWithPatch(PatchedAbsoluteAddress src,Register dest)866 CodeOffset movzwlWithPatch(PatchedAbsoluteAddress src, Register dest) {
867 masm.movzwl_mr(src.addr, dest.encoding());
868 return CodeOffset(masm.currentOffset());
869 }
movlWithPatch(PatchedAbsoluteAddress src,Register dest)870 CodeOffset movlWithPatch(PatchedAbsoluteAddress src, Register dest) {
871 masm.movl_mr(src.addr, dest.encoding());
872 return CodeOffset(masm.currentOffset());
873 }
vmovssWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)874 CodeOffset vmovssWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
875 MOZ_ASSERT(HasSSE2());
876 masm.vmovss_mr(src.addr, dest.encoding());
877 return CodeOffset(masm.currentOffset());
878 }
vmovdWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)879 CodeOffset vmovdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
880 MOZ_ASSERT(HasSSE2());
881 masm.vmovd_mr(src.addr, dest.encoding());
882 return CodeOffset(masm.currentOffset());
883 }
vmovqWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)884 CodeOffset vmovqWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
885 MOZ_ASSERT(HasSSE2());
886 masm.vmovq_mr(src.addr, dest.encoding());
887 return CodeOffset(masm.currentOffset());
888 }
vmovsdWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)889 CodeOffset vmovsdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
890 MOZ_ASSERT(HasSSE2());
891 masm.vmovsd_mr(src.addr, dest.encoding());
892 return CodeOffset(masm.currentOffset());
893 }
vmovdqaWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)894 CodeOffset vmovdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
895 MOZ_ASSERT(HasSSE2());
896 masm.vmovdqa_mr(src.addr, dest.encoding());
897 return CodeOffset(masm.currentOffset());
898 }
vmovdquWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)899 CodeOffset vmovdquWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
900 MOZ_ASSERT(HasSSE2());
901 masm.vmovdqu_mr(src.addr, dest.encoding());
902 return CodeOffset(masm.currentOffset());
903 }
vmovapsWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)904 CodeOffset vmovapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
905 MOZ_ASSERT(HasSSE2());
906 masm.vmovaps_mr(src.addr, dest.encoding());
907 return CodeOffset(masm.currentOffset());
908 }
vmovupsWithPatch(PatchedAbsoluteAddress src,FloatRegister dest)909 CodeOffset vmovupsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
910 MOZ_ASSERT(HasSSE2());
911 masm.vmovups_mr(src.addr, dest.encoding());
912 return CodeOffset(masm.currentOffset());
913 }
914
915 // Store to *dest where dest can be patched.
movbWithPatch(Register src,PatchedAbsoluteAddress dest)916 CodeOffset movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
917 masm.movb_rm(src.encoding(), dest.addr);
918 return CodeOffset(masm.currentOffset());
919 }
movwWithPatch(Register src,PatchedAbsoluteAddress dest)920 CodeOffset movwWithPatch(Register src, PatchedAbsoluteAddress dest) {
921 masm.movw_rm(src.encoding(), dest.addr);
922 return CodeOffset(masm.currentOffset());
923 }
movlWithPatch(Register src,PatchedAbsoluteAddress dest)924 CodeOffset movlWithPatch(Register src, PatchedAbsoluteAddress dest) {
925 masm.movl_rm(src.encoding(), dest.addr);
926 return CodeOffset(masm.currentOffset());
927 }
vmovssWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)928 CodeOffset vmovssWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
929 MOZ_ASSERT(HasSSE2());
930 masm.vmovss_rm(src.encoding(), dest.addr);
931 return CodeOffset(masm.currentOffset());
932 }
vmovdWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)933 CodeOffset vmovdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
934 MOZ_ASSERT(HasSSE2());
935 masm.vmovd_rm(src.encoding(), dest.addr);
936 return CodeOffset(masm.currentOffset());
937 }
vmovqWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)938 CodeOffset vmovqWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
939 MOZ_ASSERT(HasSSE2());
940 masm.vmovq_rm(src.encoding(), dest.addr);
941 return CodeOffset(masm.currentOffset());
942 }
vmovsdWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)943 CodeOffset vmovsdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
944 MOZ_ASSERT(HasSSE2());
945 masm.vmovsd_rm(src.encoding(), dest.addr);
946 return CodeOffset(masm.currentOffset());
947 }
vmovdqaWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)948 CodeOffset vmovdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
949 MOZ_ASSERT(HasSSE2());
950 masm.vmovdqa_rm(src.encoding(), dest.addr);
951 return CodeOffset(masm.currentOffset());
952 }
vmovapsWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)953 CodeOffset vmovapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
954 MOZ_ASSERT(HasSSE2());
955 masm.vmovaps_rm(src.encoding(), dest.addr);
956 return CodeOffset(masm.currentOffset());
957 }
vmovdquWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)958 CodeOffset vmovdquWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
959 MOZ_ASSERT(HasSSE2());
960 masm.vmovdqu_rm(src.encoding(), dest.addr);
961 return CodeOffset(masm.currentOffset());
962 }
vmovupsWithPatch(FloatRegister src,PatchedAbsoluteAddress dest)963 CodeOffset vmovupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
964 MOZ_ASSERT(HasSSE2());
965 masm.vmovups_rm(src.encoding(), dest.addr);
966 return CodeOffset(masm.currentOffset());
967 }
968
canUseInSingleByteInstruction(Register reg)969 static bool canUseInSingleByteInstruction(Register reg) {
970 return X86Encoding::HasSubregL(reg.encoding());
971 }
972 };
973
974 // Get a register in which we plan to put a quantity that will be used as an
975 // integer argument. This differs from GetIntArgReg in that if we have no more
976 // actual argument registers to use we will fall back on using whatever
977 // CallTempReg* don't overlap the argument registers, and only fail once those
978 // run out too.
979 static inline bool
GetTempRegForIntArg(uint32_t usedIntArgs,uint32_t usedFloatArgs,Register * out)980 GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
981 {
982 if (usedIntArgs >= NumCallTempNonArgRegs)
983 return false;
984 *out = CallTempNonArgRegs[usedIntArgs];
985 return true;
986 }
987
988 } // namespace jit
989 } // namespace js
990
991 #endif /* jit_x86_Assembler_x86_h */
992