1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/x86/CodeGenerator-x86.h"
8
9 #include "mozilla/Casting.h"
10 #include "mozilla/DebugOnly.h"
11
12 #include "jsnum.h"
13
14 #include "jit/IonCaches.h"
15 #include "jit/MIR.h"
16 #include "jit/MIRGraph.h"
17 #include "js/Conversions.h"
18 #include "vm/Shape.h"
19
20 #include "jsscriptinlines.h"
21
22 #include "jit/MacroAssembler-inl.h"
23 #include "jit/shared/CodeGenerator-shared-inl.h"
24
25 using namespace js;
26 using namespace js::jit;
27
28 using mozilla::BitwiseCast;
29 using mozilla::DebugOnly;
30 using mozilla::FloatingPoint;
31 using JS::GenericNaN;
32
CodeGeneratorX86(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)33 CodeGeneratorX86::CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
34 : CodeGeneratorX86Shared(gen, graph, masm)
35 {
36 }
37
38 static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
39
40 FrameSizeClass
FromDepth(uint32_t frameDepth)41 FrameSizeClass::FromDepth(uint32_t frameDepth)
42 {
43 for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
44 if (frameDepth < FrameSizes[i])
45 return FrameSizeClass(i);
46 }
47
48 return FrameSizeClass::None();
49 }
50
51 FrameSizeClass
ClassLimit()52 FrameSizeClass::ClassLimit()
53 {
54 return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
55 }
56
57 uint32_t
frameSize() const58 FrameSizeClass::frameSize() const
59 {
60 MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
61 MOZ_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
62
63 return FrameSizes[class_];
64 }
65
66 ValueOperand
ToValue(LInstruction * ins,size_t pos)67 CodeGeneratorX86::ToValue(LInstruction* ins, size_t pos)
68 {
69 Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
70 Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
71 return ValueOperand(typeReg, payloadReg);
72 }
73
74 ValueOperand
ToOutValue(LInstruction * ins)75 CodeGeneratorX86::ToOutValue(LInstruction* ins)
76 {
77 Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
78 Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
79 return ValueOperand(typeReg, payloadReg);
80 }
81
82 ValueOperand
ToTempValue(LInstruction * ins,size_t pos)83 CodeGeneratorX86::ToTempValue(LInstruction* ins, size_t pos)
84 {
85 Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
86 Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
87 return ValueOperand(typeReg, payloadReg);
88 }
89
90 void
visitValue(LValue * value)91 CodeGeneratorX86::visitValue(LValue* value)
92 {
93 const ValueOperand out = ToOutValue(value);
94 masm.moveValue(value->value(), out);
95 }
96
97 void
visitBox(LBox * box)98 CodeGeneratorX86::visitBox(LBox* box)
99 {
100 const LDefinition* type = box->getDef(TYPE_INDEX);
101
102 DebugOnly<const LAllocation*> a = box->getOperand(0);
103 MOZ_ASSERT(!a->isConstant());
104
105 // On x86, the input operand and the output payload have the same
106 // virtual register. All that needs to be written is the type tag for
107 // the type definition.
108 masm.mov(ImmWord(MIRTypeToTag(box->type())), ToRegister(type));
109 }
110
111 void
visitBoxFloatingPoint(LBoxFloatingPoint * box)112 CodeGeneratorX86::visitBoxFloatingPoint(LBoxFloatingPoint* box)
113 {
114 const LAllocation* in = box->getOperand(0);
115 const ValueOperand out = ToOutValue(box);
116
117 FloatRegister reg = ToFloatRegister(in);
118 if (box->type() == MIRType_Float32) {
119 masm.convertFloat32ToDouble(reg, ScratchFloat32Reg);
120 reg = ScratchFloat32Reg;
121 }
122 masm.boxDouble(reg, out);
123 }
124
125 void
visitUnbox(LUnbox * unbox)126 CodeGeneratorX86::visitUnbox(LUnbox* unbox)
127 {
128 // Note that for unbox, the type and payload indexes are switched on the
129 // inputs.
130 MUnbox* mir = unbox->mir();
131
132 if (mir->fallible()) {
133 masm.cmp32(ToOperand(unbox->type()), Imm32(MIRTypeToTag(mir->type())));
134 bailoutIf(Assembler::NotEqual, unbox->snapshot());
135 }
136 }
137
138 void
visitCompareB(LCompareB * lir)139 CodeGeneratorX86::visitCompareB(LCompareB* lir)
140 {
141 MCompare* mir = lir->mir();
142
143 const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
144 const LAllocation* rhs = lir->rhs();
145 const Register output = ToRegister(lir->output());
146
147 MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
148
149 Label notBoolean, done;
150 masm.branchTestBoolean(Assembler::NotEqual, lhs, ¬Boolean);
151 {
152 if (rhs->isConstant())
153 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
154 else
155 masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
156 masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
157 masm.jump(&done);
158 }
159 masm.bind(¬Boolean);
160 {
161 masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
162 }
163
164 masm.bind(&done);
165 }
166
167 void
visitCompareBAndBranch(LCompareBAndBranch * lir)168 CodeGeneratorX86::visitCompareBAndBranch(LCompareBAndBranch* lir)
169 {
170 MCompare* mir = lir->cmpMir();
171 const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
172 const LAllocation* rhs = lir->rhs();
173
174 MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
175
176 Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs);
177 jumpToBlock((mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(), cond);
178
179 if (rhs->isConstant())
180 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
181 else
182 masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
183 emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
184 }
185
186 void
visitCompareBitwise(LCompareBitwise * lir)187 CodeGeneratorX86::visitCompareBitwise(LCompareBitwise* lir)
188 {
189 MCompare* mir = lir->mir();
190 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
191 const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
192 const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
193 const Register output = ToRegister(lir->output());
194
195 MOZ_ASSERT(IsEqualityOp(mir->jsop()));
196
197 Label notEqual, done;
198 masm.cmp32(lhs.typeReg(), rhs.typeReg());
199 masm.j(Assembler::NotEqual, ¬Equal);
200 {
201 masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
202 masm.emitSet(cond, output);
203 masm.jump(&done);
204 }
205 masm.bind(¬Equal);
206 {
207 masm.move32(Imm32(cond == Assembler::NotEqual), output);
208 }
209
210 masm.bind(&done);
211 }
212
213 void
visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch * lir)214 CodeGeneratorX86::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
215 {
216 MCompare* mir = lir->cmpMir();
217 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
218 const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
219 const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
220
221 MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
222 mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
223
224 MBasicBlock* notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
225
226 masm.cmp32(lhs.typeReg(), rhs.typeReg());
227 jumpToBlock(notEqual, Assembler::NotEqual);
228 masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
229 emitBranch(cond, lir->ifTrue(), lir->ifFalse());
230 }
231
232 void
visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble * lir)233 CodeGeneratorX86::visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble* lir)
234 {
235 Register input = ToRegister(lir->input());
236 Register temp = ToRegister(lir->temp());
237
238 if (input != temp)
239 masm.mov(input, temp);
240
241 // Beware: convertUInt32ToDouble clobbers input.
242 masm.convertUInt32ToDouble(temp, ToFloatRegister(lir->output()));
243 }
244
245 void
visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 * lir)246 CodeGeneratorX86::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32* lir)
247 {
248 Register input = ToRegister(lir->input());
249 Register temp = ToRegister(lir->temp());
250 FloatRegister output = ToFloatRegister(lir->output());
251
252 if (input != temp)
253 masm.mov(input, temp);
254
255 // Beware: convertUInt32ToFloat32 clobbers input.
256 masm.convertUInt32ToFloat32(temp, output);
257 }
258
259 void
load(Scalar::Type accessType,const Operand & srcAddr,const LDefinition * out)260 CodeGeneratorX86::load(Scalar::Type accessType, const Operand& srcAddr, const LDefinition* out)
261 {
262 switch (accessType) {
263 case Scalar::Int8: masm.movsblWithPatch(srcAddr, ToRegister(out)); break;
264 case Scalar::Uint8Clamped:
265 case Scalar::Uint8: masm.movzblWithPatch(srcAddr, ToRegister(out)); break;
266 case Scalar::Int16: masm.movswlWithPatch(srcAddr, ToRegister(out)); break;
267 case Scalar::Uint16: masm.movzwlWithPatch(srcAddr, ToRegister(out)); break;
268 case Scalar::Int32:
269 case Scalar::Uint32: masm.movlWithPatch(srcAddr, ToRegister(out)); break;
270 case Scalar::Float32: masm.vmovssWithPatch(srcAddr, ToFloatRegister(out)); break;
271 case Scalar::Float64: masm.vmovsdWithPatch(srcAddr, ToFloatRegister(out)); break;
272 case Scalar::Float32x4:
273 case Scalar::Int32x4: MOZ_CRASH("SIMD load should be handled in their own function");
274 case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected type");
275 }
276 }
277
278 void
visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic * ins)279 CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
280 {
281 const MLoadTypedArrayElementStatic* mir = ins->mir();
282 Scalar::Type accessType = mir->accessType();
283 MOZ_ASSERT_IF(accessType == Scalar::Float32, mir->type() == MIRType_Float32);
284
285 Register ptr = ToRegister(ins->ptr());
286 const LDefinition* out = ins->output();
287 OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
288 uint32_t offset = mir->offset();
289
290 if (mir->needsBoundsCheck()) {
291 MOZ_ASSERT(offset == 0);
292 if (!mir->fallible()) {
293 ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
294 addOutOfLineCode(ool, ins->mir());
295 }
296
297 masm.cmpPtr(ptr, ImmWord(mir->length()));
298 if (ool)
299 masm.j(Assembler::AboveOrEqual, ool->entry());
300 else
301 bailoutIf(Assembler::AboveOrEqual, ins->snapshot());
302 }
303
304 Operand srcAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
305 load(accessType, srcAddr, out);
306 if (accessType == Scalar::Float64)
307 masm.canonicalizeDouble(ToFloatRegister(out));
308 if (accessType == Scalar::Float32)
309 masm.canonicalizeFloat(ToFloatRegister(out));
310 if (ool)
311 masm.bind(ool->rejoin());
312 }
313
314 void
visitAsmJSCall(LAsmJSCall * ins)315 CodeGeneratorX86::visitAsmJSCall(LAsmJSCall* ins)
316 {
317 MAsmJSCall* mir = ins->mir();
318
319 emitAsmJSCall(ins);
320
321 if (IsFloatingPointType(mir->type()) && mir->callee().which() == MAsmJSCall::Callee::Builtin) {
322 if (mir->type() == MIRType_Float32) {
323 masm.reserveStack(sizeof(float));
324 Operand op(esp, 0);
325 masm.fstp32(op);
326 masm.loadFloat32(op, ReturnFloat32Reg);
327 masm.freeStack(sizeof(float));
328 } else {
329 MOZ_ASSERT(mir->type() == MIRType_Double);
330 masm.reserveStack(sizeof(double));
331 Operand op(esp, 0);
332 masm.fstp(op);
333 masm.loadDouble(op, ReturnDoubleReg);
334 masm.freeStack(sizeof(double));
335 }
336 }
337 }
338
339 void
memoryBarrier(MemoryBarrierBits barrier)340 CodeGeneratorX86::memoryBarrier(MemoryBarrierBits barrier)
341 {
342 if (barrier & MembarStoreLoad)
343 masm.storeLoadFence();
344 }
345
346 void
loadSimd(Scalar::Type type,unsigned numElems,const Operand & srcAddr,FloatRegister out)347 CodeGeneratorX86::loadSimd(Scalar::Type type, unsigned numElems, const Operand& srcAddr,
348 FloatRegister out)
349 {
350 switch (type) {
351 case Scalar::Float32x4: {
352 switch (numElems) {
353 // In memory-to-register mode, movss zeroes out the high lanes.
354 case 1: masm.vmovssWithPatch(srcAddr, out); break;
355 // See comment above, which also applies to movsd.
356 case 2: masm.vmovsdWithPatch(srcAddr, out); break;
357 case 4: masm.vmovupsWithPatch(srcAddr, out); break;
358 default: MOZ_CRASH("unexpected size for partial load");
359 }
360 break;
361 }
362 case Scalar::Int32x4: {
363 switch (numElems) {
364 // In memory-to-register mode, movd zeroes out the high lanes.
365 case 1: masm.vmovdWithPatch(srcAddr, out); break;
366 // See comment above, which also applies to movq.
367 case 2: masm.vmovqWithPatch(srcAddr, out); break;
368 case 4: masm.vmovdquWithPatch(srcAddr, out); break;
369 default: MOZ_CRASH("unexpected size for partial load");
370 }
371 break;
372 }
373 case Scalar::Int8:
374 case Scalar::Uint8:
375 case Scalar::Int16:
376 case Scalar::Uint16:
377 case Scalar::Int32:
378 case Scalar::Uint32:
379 case Scalar::Float32:
380 case Scalar::Float64:
381 case Scalar::Uint8Clamped:
382 case Scalar::MaxTypedArrayViewType:
383 MOZ_CRASH("should only handle SIMD types");
384 }
385 }
386
387 void
emitSimdLoad(LAsmJSLoadHeap * ins)388 CodeGeneratorX86::emitSimdLoad(LAsmJSLoadHeap* ins)
389 {
390 const MAsmJSLoadHeap* mir = ins->mir();
391 Scalar::Type type = mir->accessType();
392 FloatRegister out = ToFloatRegister(ins->output());
393 const LAllocation* ptr = ins->ptr();
394 Operand srcAddr = ptr->isBogus()
395 ? Operand(PatchedAbsoluteAddress(mir->offset()))
396 : Operand(ToRegister(ptr), mir->offset());
397
398 uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
399 if (gen->needsAsmJSBoundsCheckBranch(mir))
400 maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr),
401 masm.asmOnOutOfBoundsLabel());
402
403 unsigned numElems = mir->numSimdElems();
404 if (numElems == 3) {
405 MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
406
407 Operand srcAddrZ =
408 ptr->isBogus()
409 ? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset()))
410 : Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
411
412 // Load XY
413 uint32_t before = masm.size();
414 loadSimd(type, 2, srcAddr, out);
415 uint32_t after = masm.size();
416 masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
417
418 // Load Z (W is zeroed)
419 // This is still in bounds, as we've checked with a manual bounds check
420 // or we had enough space for sure when removing the bounds check.
421 before = after;
422 loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg);
423 after = masm.size();
424 masm.append(wasm::HeapAccess(before, after));
425
426 // Move ZW atop XY
427 masm.vmovlhps(ScratchSimd128Reg, out, out);
428 } else {
429 uint32_t before = masm.size();
430 loadSimd(type, numElems, srcAddr, out);
431 uint32_t after = masm.size();
432 masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
433 }
434
435 if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
436 cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
437 }
438
439 void
visitAsmJSLoadHeap(LAsmJSLoadHeap * ins)440 CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
441 {
442 const MAsmJSLoadHeap* mir = ins->mir();
443 Scalar::Type accessType = mir->accessType();
444
445 if (Scalar::isSimdType(accessType))
446 return emitSimdLoad(ins);
447
448 const LAllocation* ptr = ins->ptr();
449 const LDefinition* out = ins->output();
450 Operand srcAddr = ptr->isBogus()
451 ? Operand(PatchedAbsoluteAddress(mir->offset()))
452 : Operand(ToRegister(ptr), mir->offset());
453
454 memoryBarrier(mir->barrierBefore());
455 OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
456 uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
457 if (gen->needsAsmJSBoundsCheckBranch(mir)) {
458 Label* jumpTo = nullptr;
459 if (mir->isAtomicAccess()) {
460 jumpTo = masm.asmOnOutOfBoundsLabel();
461 } else {
462 ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
463 addOutOfLineCode(ool, mir);
464 jumpTo = ool->entry();
465 }
466 maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr), jumpTo);
467 }
468
469 uint32_t before = masm.size();
470 load(accessType, srcAddr, out);
471 uint32_t after = masm.size();
472 if (ool) {
473 cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
474 masm.bind(ool->rejoin());
475 }
476 memoryBarrier(mir->barrierAfter());
477 masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
478 }
479
480 void
store(Scalar::Type accessType,const LAllocation * value,const Operand & dstAddr)481 CodeGeneratorX86::store(Scalar::Type accessType, const LAllocation* value, const Operand& dstAddr)
482 {
483 switch (accessType) {
484 case Scalar::Int8:
485 case Scalar::Uint8Clamped:
486 case Scalar::Uint8: masm.movbWithPatch(ToRegister(value), dstAddr); break;
487 case Scalar::Int16:
488 case Scalar::Uint16: masm.movwWithPatch(ToRegister(value), dstAddr); break;
489 case Scalar::Int32:
490 case Scalar::Uint32: masm.movlWithPatch(ToRegister(value), dstAddr); break;
491 case Scalar::Float32: masm.vmovssWithPatch(ToFloatRegister(value), dstAddr); break;
492 case Scalar::Float64: masm.vmovsdWithPatch(ToFloatRegister(value), dstAddr); break;
493 case Scalar::Float32x4:
494 case Scalar::Int32x4: MOZ_CRASH("SIMD stores should be handled in emitSimdStore");
495 case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected type");
496 }
497 }
498
499 void
visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic * ins)500 CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
501 {
502 MStoreTypedArrayElementStatic* mir = ins->mir();
503 Scalar::Type accessType = mir->accessType();
504 Register ptr = ToRegister(ins->ptr());
505 const LAllocation* value = ins->value();
506 uint32_t offset = mir->offset();
507
508 if (!mir->needsBoundsCheck()) {
509 Operand dstAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
510 store(accessType, value, dstAddr);
511 return;
512 }
513
514 MOZ_ASSERT(offset == 0);
515 masm.cmpPtr(ptr, ImmWord(mir->length()));
516 Label rejoin;
517 masm.j(Assembler::AboveOrEqual, &rejoin);
518
519 Operand dstAddr(ptr, int32_t(mir->base().asValue()));
520 store(accessType, value, dstAddr);
521 masm.bind(&rejoin);
522 }
523
524 void
storeSimd(Scalar::Type type,unsigned numElems,FloatRegister in,const Operand & dstAddr)525 CodeGeneratorX86::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in,
526 const Operand& dstAddr)
527 {
528 switch (type) {
529 case Scalar::Float32x4: {
530 switch (numElems) {
531 // In memory-to-register mode, movss zeroes out the high lanes.
532 case 1: masm.vmovssWithPatch(in, dstAddr); break;
533 // See comment above, which also applies to movsd.
534 case 2: masm.vmovsdWithPatch(in, dstAddr); break;
535 case 4: masm.vmovupsWithPatch(in, dstAddr); break;
536 default: MOZ_CRASH("unexpected size for partial load");
537 }
538 break;
539 }
540 case Scalar::Int32x4: {
541 switch (numElems) {
542 // In memory-to-register mode, movd zeroes out the high lanes.
543 case 1: masm.vmovdWithPatch(in, dstAddr); break;
544 // See comment above, which also applies to movsd.
545 case 2: masm.vmovqWithPatch(in, dstAddr); break;
546 case 4: masm.vmovdquWithPatch(in, dstAddr); break;
547 default: MOZ_CRASH("unexpected size for partial load");
548 }
549 break;
550 }
551 case Scalar::Int8:
552 case Scalar::Uint8:
553 case Scalar::Int16:
554 case Scalar::Uint16:
555 case Scalar::Int32:
556 case Scalar::Uint32:
557 case Scalar::Float32:
558 case Scalar::Float64:
559 case Scalar::Uint8Clamped:
560 case Scalar::MaxTypedArrayViewType:
561 MOZ_CRASH("should only handle SIMD types");
562 }
563 }
564
565 void
emitSimdStore(LAsmJSStoreHeap * ins)566 CodeGeneratorX86::emitSimdStore(LAsmJSStoreHeap* ins)
567 {
568 const MAsmJSStoreHeap* mir = ins->mir();
569 Scalar::Type type = mir->accessType();
570 FloatRegister in = ToFloatRegister(ins->value());
571 const LAllocation* ptr = ins->ptr();
572 Operand dstAddr = ptr->isBogus()
573 ? Operand(PatchedAbsoluteAddress(mir->offset()))
574 : Operand(ToRegister(ptr), mir->offset());
575
576 uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
577 if (gen->needsAsmJSBoundsCheckBranch(mir))
578 maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr),
579 masm.asmOnOutOfBoundsLabel());
580
581 unsigned numElems = mir->numSimdElems();
582 if (numElems == 3) {
583 MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
584
585 Operand dstAddrZ =
586 ptr->isBogus()
587 ? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset()))
588 : Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
589
590 // Store XY
591 uint32_t before = masm.size();
592 storeSimd(type, 2, in, dstAddr);
593 uint32_t after = masm.size();
594 masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
595
596 masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg);
597
598 // Store Z (W is zeroed)
599 // This is still in bounds, as we've checked with a manual bounds check
600 // or we had enough space for sure when removing the bounds check.
601 before = masm.size();
602 storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ);
603 after = masm.size();
604 masm.append(wasm::HeapAccess(before, after));
605 } else {
606 uint32_t before = masm.size();
607 storeSimd(type, numElems, in, dstAddr);
608 uint32_t after = masm.size();
609 masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
610 }
611
612 if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
613 cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
614 }
615
616 void
visitAsmJSStoreHeap(LAsmJSStoreHeap * ins)617 CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
618 {
619 const MAsmJSStoreHeap* mir = ins->mir();
620 Scalar::Type accessType = mir->accessType();
621
622 if (Scalar::isSimdType(accessType))
623 return emitSimdStore(ins);
624
625 const LAllocation* value = ins->value();
626 const LAllocation* ptr = ins->ptr();
627 Operand dstAddr = ptr->isBogus()
628 ? Operand(PatchedAbsoluteAddress(mir->offset()))
629 : Operand(ToRegister(ptr), mir->offset());
630
631 memoryBarrier(mir->barrierBefore());
632 Label* rejoin = nullptr;
633 uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
634 if (gen->needsAsmJSBoundsCheckBranch(mir)) {
635 Label* jumpTo = nullptr;
636 if (mir->isAtomicAccess())
637 jumpTo = masm.asmOnOutOfBoundsLabel();
638 else
639 rejoin = jumpTo = alloc().lifoAlloc()->newInfallible<Label>();
640 maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr), jumpTo);
641 }
642
643 uint32_t before = masm.size();
644 store(accessType, value, dstAddr);
645 uint32_t after = masm.size();
646 if (rejoin) {
647 cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
648 masm.bind(rejoin);
649 }
650 memoryBarrier(mir->barrierAfter());
651 masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
652 }
653
654 void
visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap * ins)655 CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
656 {
657 MAsmJSCompareExchangeHeap* mir = ins->mir();
658 Scalar::Type accessType = mir->accessType();
659 Register ptrReg = ToRegister(ins->ptr());
660 Register oldval = ToRegister(ins->oldValue());
661 Register newval = ToRegister(ins->newValue());
662 Register addrTemp = ToRegister(ins->addrTemp());
663
664 asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(),
665 mir->endOffset());
666
667 Address memAddr(addrTemp, mir->offset());
668 masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
669 memAddr,
670 oldval,
671 newval,
672 InvalidReg,
673 ToAnyRegister(ins->output()));
674 }
675
676 // Perform bounds checking on the access if necessary; if it fails,
677 // jump to out-of-line code that throws. If the bounds check passes,
678 // set up the heap address in addrTemp.
679
680 void
asmJSAtomicComputeAddress(Register addrTemp,Register ptrReg,bool boundsCheck,int32_t offset,int32_t endOffset)681 CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg, bool boundsCheck,
682 int32_t offset, int32_t endOffset)
683 {
684 uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
685
686 if (boundsCheck) {
687 maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-endOffset)).offset();
688 masm.j(Assembler::Above, masm.asmOnOutOfBoundsLabel());
689 }
690
691 // Add in the actual heap pointer explicitly, to avoid opening up
692 // the abstraction that is atomicBinopToTypedIntArray at this time.
693 masm.movl(ptrReg, addrTemp);
694 uint32_t before = masm.size();
695 masm.addlWithPatch(Imm32(offset), addrTemp);
696 uint32_t after = masm.size();
697 masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
698 }
699
700 void
visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap * ins)701 CodeGeneratorX86::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
702 {
703 MAsmJSAtomicExchangeHeap* mir = ins->mir();
704 Scalar::Type accessType = mir->accessType();
705 Register ptrReg = ToRegister(ins->ptr());
706 Register value = ToRegister(ins->value());
707 Register addrTemp = ToRegister(ins->addrTemp());
708
709 asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(),
710 mir->endOffset());
711
712 Address memAddr(addrTemp, mir->offset());
713 masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
714 memAddr,
715 value,
716 InvalidReg,
717 ToAnyRegister(ins->output()));
718 }
719
720 void
visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap * ins)721 CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
722 {
723 MAsmJSAtomicBinopHeap* mir = ins->mir();
724 Scalar::Type accessType = mir->accessType();
725 Register ptrReg = ToRegister(ins->ptr());
726 Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
727 Register addrTemp = ToRegister(ins->addrTemp());
728 const LAllocation* value = ins->value();
729 AtomicOp op = mir->operation();
730
731 asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(),
732 mir->endOffset());
733
734 Address memAddr(addrTemp, mir->offset());
735 if (value->isConstant()) {
736 atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
737 Imm32(ToInt32(value)),
738 memAddr,
739 temp,
740 InvalidReg,
741 ToAnyRegister(ins->output()));
742 } else {
743 atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
744 ToRegister(value),
745 memAddr,
746 temp,
747 InvalidReg,
748 ToAnyRegister(ins->output()));
749 }
750 }
751
752 void
visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect * ins)753 CodeGeneratorX86::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
754 {
755 MAsmJSAtomicBinopHeap* mir = ins->mir();
756 Scalar::Type accessType = mir->accessType();
757 Register ptrReg = ToRegister(ins->ptr());
758 Register addrTemp = ToRegister(ins->addrTemp());
759 const LAllocation* value = ins->value();
760 AtomicOp op = mir->operation();
761
762 MOZ_ASSERT(!mir->hasUses());
763
764 asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(),
765 mir->endOffset());
766
767 Address memAddr(addrTemp, mir->offset());
768 if (value->isConstant())
769 atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), memAddr);
770 else
771 atomicBinopToTypedIntArray(op, accessType, ToRegister(value), memAddr);
772 }
773
774 void
visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar * ins)775 CodeGeneratorX86::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
776 {
777 MAsmJSLoadGlobalVar* mir = ins->mir();
778 MIRType type = mir->type();
779 MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
780
781 CodeOffset label;
782 switch (type) {
783 case MIRType_Int32:
784 label = masm.movlWithPatch(PatchedAbsoluteAddress(), ToRegister(ins->output()));
785 break;
786 case MIRType_Float32:
787 label = masm.vmovssWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
788 break;
789 case MIRType_Double:
790 label = masm.vmovsdWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
791 break;
792 // Aligned access: code is aligned on PageSize + there is padding
793 // before the global data section.
794 case MIRType_Int32x4:
795 label = masm.vmovdqaWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
796 break;
797 case MIRType_Float32x4:
798 label = masm.vmovapsWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
799 break;
800 default:
801 MOZ_CRASH("unexpected type in visitAsmJSLoadGlobalVar");
802 }
803 masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
804 }
805
806 void
visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar * ins)807 CodeGeneratorX86::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar* ins)
808 {
809 MAsmJSStoreGlobalVar* mir = ins->mir();
810
811 MIRType type = mir->value()->type();
812 MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
813
814 CodeOffset label;
815 switch (type) {
816 case MIRType_Int32:
817 label = masm.movlWithPatch(ToRegister(ins->value()), PatchedAbsoluteAddress());
818 break;
819 case MIRType_Float32:
820 label = masm.vmovssWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
821 break;
822 case MIRType_Double:
823 label = masm.vmovsdWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
824 break;
825 // Aligned access: code is aligned on PageSize + there is padding
826 // before the global data section.
827 case MIRType_Int32x4:
828 label = masm.vmovdqaWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
829 break;
830 case MIRType_Float32x4:
831 label = masm.vmovapsWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
832 break;
833 default:
834 MOZ_CRASH("unexpected type in visitAsmJSStoreGlobalVar");
835 }
836 masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
837 }
838
839 void
visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr * ins)840 CodeGeneratorX86::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr* ins)
841 {
842 MAsmJSLoadFuncPtr* mir = ins->mir();
843
844 Register index = ToRegister(ins->index());
845 Register out = ToRegister(ins->output());
846 CodeOffset label = masm.movlWithPatch(PatchedAbsoluteAddress(), index, TimesFour, out);
847 masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
848 }
849
850 void
visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc * ins)851 CodeGeneratorX86::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc* ins)
852 {
853 MAsmJSLoadFFIFunc* mir = ins->mir();
854
855 Register out = ToRegister(ins->output());
856 CodeOffset label = masm.movlWithPatch(PatchedAbsoluteAddress(), out);
857 masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
858 }
859
860 namespace js {
861 namespace jit {
862
863 class OutOfLineTruncate : public OutOfLineCodeBase<CodeGeneratorX86>
864 {
865 LTruncateDToInt32* ins_;
866
867 public:
OutOfLineTruncate(LTruncateDToInt32 * ins)868 OutOfLineTruncate(LTruncateDToInt32* ins)
869 : ins_(ins)
870 { }
871
accept(CodeGeneratorX86 * codegen)872 void accept(CodeGeneratorX86* codegen) {
873 codegen->visitOutOfLineTruncate(this);
874 }
ins() const875 LTruncateDToInt32* ins() const {
876 return ins_;
877 }
878 };
879
880 class OutOfLineTruncateFloat32 : public OutOfLineCodeBase<CodeGeneratorX86>
881 {
882 LTruncateFToInt32* ins_;
883
884 public:
OutOfLineTruncateFloat32(LTruncateFToInt32 * ins)885 OutOfLineTruncateFloat32(LTruncateFToInt32* ins)
886 : ins_(ins)
887 { }
888
accept(CodeGeneratorX86 * codegen)889 void accept(CodeGeneratorX86* codegen) {
890 codegen->visitOutOfLineTruncateFloat32(this);
891 }
ins() const892 LTruncateFToInt32* ins() const {
893 return ins_;
894 }
895 };
896
897 } // namespace jit
898 } // namespace js
899
900 void
visitTruncateDToInt32(LTruncateDToInt32 * ins)901 CodeGeneratorX86::visitTruncateDToInt32(LTruncateDToInt32* ins)
902 {
903 FloatRegister input = ToFloatRegister(ins->input());
904 Register output = ToRegister(ins->output());
905
906 OutOfLineTruncate* ool = new(alloc()) OutOfLineTruncate(ins);
907 addOutOfLineCode(ool, ins->mir());
908
909 masm.branchTruncateDouble(input, output, ool->entry());
910 masm.bind(ool->rejoin());
911 }
912
913 void
visitTruncateFToInt32(LTruncateFToInt32 * ins)914 CodeGeneratorX86::visitTruncateFToInt32(LTruncateFToInt32* ins)
915 {
916 FloatRegister input = ToFloatRegister(ins->input());
917 Register output = ToRegister(ins->output());
918
919 OutOfLineTruncateFloat32* ool = new(alloc()) OutOfLineTruncateFloat32(ins);
920 addOutOfLineCode(ool, ins->mir());
921
922 masm.branchTruncateFloat32(input, output, ool->entry());
923 masm.bind(ool->rejoin());
924 }
925
926 void
visitOutOfLineTruncate(OutOfLineTruncate * ool)927 CodeGeneratorX86::visitOutOfLineTruncate(OutOfLineTruncate* ool)
928 {
929 LTruncateDToInt32* ins = ool->ins();
930 FloatRegister input = ToFloatRegister(ins->input());
931 Register output = ToRegister(ins->output());
932
933 Label fail;
934
935 if (Assembler::HasSSE3()) {
936 // Push double.
937 masm.subl(Imm32(sizeof(double)), esp);
938 masm.storeDouble(input, Operand(esp, 0));
939
940 static const uint32_t EXPONENT_MASK = 0x7ff00000;
941 static const uint32_t EXPONENT_SHIFT = FloatingPoint<double>::kExponentShift - 32;
942 static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<double>::kExponentBias + 63)
943 << EXPONENT_SHIFT;
944
945 // Check exponent to avoid fp exceptions.
946 Label failPopDouble;
947 masm.load32(Address(esp, 4), output);
948 masm.and32(Imm32(EXPONENT_MASK), output);
949 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(TOO_BIG_EXPONENT), &failPopDouble);
950
951 // Load double, perform 64-bit truncation.
952 masm.fld(Operand(esp, 0));
953 masm.fisttp(Operand(esp, 0));
954
955 // Load low word, pop double and jump back.
956 masm.load32(Address(esp, 0), output);
957 masm.addl(Imm32(sizeof(double)), esp);
958 masm.jump(ool->rejoin());
959
960 masm.bind(&failPopDouble);
961 masm.addl(Imm32(sizeof(double)), esp);
962 masm.jump(&fail);
963 } else {
964 FloatRegister temp = ToFloatRegister(ins->tempFloat());
965
966 // Try to convert doubles representing integers within 2^32 of a signed
967 // integer, by adding/subtracting 2^32 and then trying to convert to int32.
968 // This has to be an exact conversion, as otherwise the truncation works
969 // incorrectly on the modified value.
970 masm.zeroDouble(ScratchDoubleReg);
971 masm.vucomisd(ScratchDoubleReg, input);
972 masm.j(Assembler::Parity, &fail);
973
974 {
975 Label positive;
976 masm.j(Assembler::Above, &positive);
977
978 masm.loadConstantDouble(4294967296.0, temp);
979 Label skip;
980 masm.jmp(&skip);
981
982 masm.bind(&positive);
983 masm.loadConstantDouble(-4294967296.0, temp);
984 masm.bind(&skip);
985 }
986
987 masm.addDouble(input, temp);
988 masm.vcvttsd2si(temp, output);
989 masm.vcvtsi2sd(output, ScratchDoubleReg, ScratchDoubleReg);
990
991 masm.vucomisd(ScratchDoubleReg, temp);
992 masm.j(Assembler::Parity, &fail);
993 masm.j(Assembler::Equal, ool->rejoin());
994 }
995
996 masm.bind(&fail);
997 {
998 saveVolatile(output);
999
1000 masm.setupUnalignedABICall(output);
1001 masm.passABIArg(input, MoveOp::DOUBLE);
1002 if (gen->compilingAsmJS())
1003 masm.callWithABI(wasm::SymbolicAddress::ToInt32);
1004 else
1005 masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
1006 masm.storeCallResult(output);
1007
1008 restoreVolatile(output);
1009 }
1010
1011 masm.jump(ool->rejoin());
1012 }
1013
1014 void
visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32 * ool)1015 CodeGeneratorX86::visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32* ool)
1016 {
1017 LTruncateFToInt32* ins = ool->ins();
1018 FloatRegister input = ToFloatRegister(ins->input());
1019 Register output = ToRegister(ins->output());
1020
1021 Label fail;
1022
1023 if (Assembler::HasSSE3()) {
1024 // Push float32, but subtracts 64 bits so that the value popped by fisttp fits
1025 masm.subl(Imm32(sizeof(uint64_t)), esp);
1026 masm.storeFloat32(input, Operand(esp, 0));
1027
1028 static const uint32_t EXPONENT_MASK = FloatingPoint<float>::kExponentBits;
1029 static const uint32_t EXPONENT_SHIFT = FloatingPoint<float>::kExponentShift;
1030 // Integers are still 64 bits long, so we can still test for an exponent > 63.
1031 static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<float>::kExponentBias + 63)
1032 << EXPONENT_SHIFT;
1033
1034 // Check exponent to avoid fp exceptions.
1035 Label failPopFloat;
1036 masm.movl(Operand(esp, 0), output);
1037 masm.and32(Imm32(EXPONENT_MASK), output);
1038 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(TOO_BIG_EXPONENT), &failPopFloat);
1039
1040 // Load float, perform 32-bit truncation.
1041 masm.fld32(Operand(esp, 0));
1042 masm.fisttp(Operand(esp, 0));
1043
1044 // Load low word, pop 64bits and jump back.
1045 masm.load32(Address(esp, 0), output);
1046 masm.addl(Imm32(sizeof(uint64_t)), esp);
1047 masm.jump(ool->rejoin());
1048
1049 masm.bind(&failPopFloat);
1050 masm.addl(Imm32(sizeof(uint64_t)), esp);
1051 masm.jump(&fail);
1052 } else {
1053 FloatRegister temp = ToFloatRegister(ins->tempFloat());
1054
1055 // Try to convert float32 representing integers within 2^32 of a signed
1056 // integer, by adding/subtracting 2^32 and then trying to convert to int32.
1057 // This has to be an exact conversion, as otherwise the truncation works
1058 // incorrectly on the modified value.
1059 masm.zeroFloat32(ScratchFloat32Reg);
1060 masm.vucomiss(ScratchFloat32Reg, input);
1061 masm.j(Assembler::Parity, &fail);
1062
1063 {
1064 Label positive;
1065 masm.j(Assembler::Above, &positive);
1066
1067 masm.loadConstantFloat32(4294967296.f, temp);
1068 Label skip;
1069 masm.jmp(&skip);
1070
1071 masm.bind(&positive);
1072 masm.loadConstantFloat32(-4294967296.f, temp);
1073 masm.bind(&skip);
1074 }
1075
1076 masm.addFloat32(input, temp);
1077 masm.vcvttss2si(temp, output);
1078 masm.vcvtsi2ss(output, ScratchFloat32Reg, ScratchFloat32Reg);
1079
1080 masm.vucomiss(ScratchFloat32Reg, temp);
1081 masm.j(Assembler::Parity, &fail);
1082 masm.j(Assembler::Equal, ool->rejoin());
1083 }
1084
1085 masm.bind(&fail);
1086 {
1087 saveVolatile(output);
1088
1089 masm.push(input);
1090 masm.setupUnalignedABICall(output);
1091 masm.vcvtss2sd(input, input, input);
1092 masm.passABIArg(input.asDouble(), MoveOp::DOUBLE);
1093
1094 if (gen->compilingAsmJS())
1095 masm.callWithABI(wasm::SymbolicAddress::ToInt32);
1096 else
1097 masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
1098
1099 masm.storeCallResult(output);
1100 masm.pop(input);
1101
1102 restoreVolatile(output);
1103 }
1104
1105 masm.jump(ool->rejoin());
1106 }
1107