1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/x86/CodeGenerator-x86.h"
8
9 #include "mozilla/Casting.h"
10 #include "mozilla/DebugOnly.h"
11
12 #include <iterator>
13
14 #include "jsnum.h"
15
16 #include "jit/CodeGenerator.h"
17 #include "jit/MIR.h"
18 #include "jit/MIRGraph.h"
19 #include "js/Conversions.h"
20 #include "vm/Shape.h"
21 #include "wasm/WasmTypes.h"
22
23 #include "jit/MacroAssembler-inl.h"
24 #include "jit/shared/CodeGenerator-shared-inl.h"
25 #include "vm/JSScript-inl.h"
26
27 using namespace js;
28 using namespace js::jit;
29
30 using JS::GenericNaN;
31 using mozilla::BitwiseCast;
32 using mozilla::DebugOnly;
33 using mozilla::FloatingPoint;
34
CodeGeneratorX86(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)35 CodeGeneratorX86::CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph,
36 MacroAssembler* masm)
37 : CodeGeneratorX86Shared(gen, graph, masm) {}
38
39 static const uint32_t FrameSizes[] = {128, 256, 512, 1024};
40
FromDepth(uint32_t frameDepth)41 FrameSizeClass FrameSizeClass::FromDepth(uint32_t frameDepth) {
42 for (uint32_t i = 0; i < std::size(FrameSizes); i++) {
43 if (frameDepth < FrameSizes[i]) {
44 return FrameSizeClass(i);
45 }
46 }
47
48 return FrameSizeClass::None();
49 }
50
ClassLimit()51 FrameSizeClass FrameSizeClass::ClassLimit() {
52 return FrameSizeClass(std::size(FrameSizes));
53 }
54
frameSize() const55 uint32_t FrameSizeClass::frameSize() const {
56 MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
57 MOZ_ASSERT(class_ < std::size(FrameSizes));
58
59 return FrameSizes[class_];
60 }
61
ToValue(LInstruction * ins,size_t pos)62 ValueOperand CodeGeneratorX86::ToValue(LInstruction* ins, size_t pos) {
63 Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
64 Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
65 return ValueOperand(typeReg, payloadReg);
66 }
67
ToTempValue(LInstruction * ins,size_t pos)68 ValueOperand CodeGeneratorX86::ToTempValue(LInstruction* ins, size_t pos) {
69 Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
70 Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
71 return ValueOperand(typeReg, payloadReg);
72 }
73
visitValue(LValue * value)74 void CodeGenerator::visitValue(LValue* value) {
75 const ValueOperand out = ToOutValue(value);
76 masm.moveValue(value->value(), out);
77 }
78
visitBox(LBox * box)79 void CodeGenerator::visitBox(LBox* box) {
80 const LDefinition* type = box->getDef(TYPE_INDEX);
81
82 DebugOnly<const LAllocation*> a = box->getOperand(0);
83 MOZ_ASSERT(!a->isConstant());
84
85 // On x86, the input operand and the output payload have the same
86 // virtual register. All that needs to be written is the type tag for
87 // the type definition.
88 masm.mov(ImmWord(MIRTypeToTag(box->type())), ToRegister(type));
89 }
90
visitBoxFloatingPoint(LBoxFloatingPoint * box)91 void CodeGenerator::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
92 const AnyRegister in = ToAnyRegister(box->getOperand(0));
93 const ValueOperand out = ToOutValue(box);
94
95 masm.moveValue(TypedOrValueRegister(box->type(), in), out);
96
97 if (JitOptions.spectreValueMasking) {
98 Register scratch = ToRegister(box->spectreTemp());
99 masm.move32(Imm32(JSVAL_TAG_CLEAR), scratch);
100 masm.cmp32Move32(Assembler::Below, scratch, out.typeReg(), scratch,
101 out.typeReg());
102 }
103 }
104
visitUnbox(LUnbox * unbox)105 void CodeGenerator::visitUnbox(LUnbox* unbox) {
106 // Note that for unbox, the type and payload indexes are switched on the
107 // inputs.
108 Operand type = ToOperand(unbox->type());
109 Operand payload = ToOperand(unbox->payload());
110 Register output = ToRegister(unbox->output());
111 MUnbox* mir = unbox->mir();
112
113 JSValueTag tag = MIRTypeToTag(mir->type());
114 if (mir->fallible()) {
115 masm.cmp32(type, Imm32(tag));
116 bailoutIf(Assembler::NotEqual, unbox->snapshot());
117 } else {
118 #ifdef DEBUG
119 Label ok;
120 masm.branch32(Assembler::Equal, type, Imm32(tag), &ok);
121 masm.assumeUnreachable("Infallible unbox type mismatch");
122 masm.bind(&ok);
123 #endif
124 }
125
126 // Note: If spectreValueMasking is disabled, then this instruction will
127 // default to a no-op as long as the lowering allocate the same register for
128 // the output and the payload.
129 masm.unboxNonDouble(type, payload, output, ValueTypeFromMIRType(mir->type()));
130 }
131
visitAtomicLoad64(LAtomicLoad64 * lir)132 void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
133 Register elements = ToRegister(lir->elements());
134 Register temp = ToRegister(lir->temp());
135 Register64 temp64 = ToRegister64(lir->temp64());
136 Register out = ToRegister(lir->output());
137
138 MOZ_ASSERT(out == ecx);
139 MOZ_ASSERT(temp == ebx);
140 MOZ_ASSERT(temp64 == Register64(edx, eax));
141
142 const MLoadUnboxedScalar* mir = lir->mir();
143
144 Scalar::Type storageType = mir->storageType();
145
146 if (lir->index()->isConstant()) {
147 Address source =
148 ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
149 masm.atomicLoad64(Synchronization::Load(), source, Register64(ecx, ebx),
150 Register64(edx, eax));
151 } else {
152 BaseIndex source(elements, ToRegister(lir->index()),
153 ScaleFromScalarType(storageType), mir->offsetAdjustment());
154 masm.atomicLoad64(Synchronization::Load(), source, Register64(ecx, ebx),
155 Register64(edx, eax));
156 }
157
158 emitCreateBigInt(lir, storageType, temp64, out, temp);
159 }
160
visitAtomicStore64(LAtomicStore64 * lir)161 void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
162 Register elements = ToRegister(lir->elements());
163 Register value = ToRegister(lir->value());
164 Register64 temp1 = ToRegister64(lir->temp1());
165 Register64 temp2 = Register64(value, ToRegister(lir->tempLow()));
166
167 MOZ_ASSERT(temp1 == Register64(ecx, ebx));
168 MOZ_ASSERT(temp2 == Register64(edx, eax));
169
170 Scalar::Type writeType = lir->mir()->writeType();
171
172 masm.loadBigInt64(value, temp1);
173
174 masm.push(value);
175 if (lir->index()->isConstant()) {
176 Address dest = ToAddress(elements, lir->index(), writeType);
177 masm.atomicStore64(Synchronization::Store(), dest, temp1, temp2);
178 } else {
179 BaseIndex dest(elements, ToRegister(lir->index()),
180 ScaleFromScalarType(writeType));
181 masm.atomicStore64(Synchronization::Store(), dest, temp1, temp2);
182 }
183 masm.pop(value);
184 }
185
visitCompareExchangeTypedArrayElement64(LCompareExchangeTypedArrayElement64 * lir)186 void CodeGenerator::visitCompareExchangeTypedArrayElement64(
187 LCompareExchangeTypedArrayElement64* lir) {
188 Register elements = ToRegister(lir->elements());
189 Register oldval = ToRegister(lir->oldval());
190 DebugOnly<Register> newval = ToRegister(lir->newval());
191 DebugOnly<Register> temp = ToRegister(lir->tempLow());
192 Register out = ToRegister(lir->output());
193
194 MOZ_ASSERT(elements == esi);
195 MOZ_ASSERT(oldval == eax);
196 MOZ_ASSERT(newval.inspect() == edx);
197 MOZ_ASSERT(temp.inspect() == ebx);
198 MOZ_ASSERT(out == ecx);
199
200 Scalar::Type arrayType = lir->mir()->arrayType();
201
202 DebugOnly<uint32_t> framePushed = masm.framePushed();
203
204 // Save eax and edx before they're clobbered below.
205 masm.push(eax);
206 masm.push(edx);
207
208 auto restoreSavedRegisters = [&]() {
209 masm.pop(edx);
210 masm.pop(eax);
211 };
212
213 Register64 expected = Register64(edx, eax);
214 Register64 replacement = Register64(ecx, ebx);
215
216 // Load |oldval| and |newval| into |expected| resp. |replacement|.
217 {
218 // Use `esi` as a temp register.
219 Register bigInt = esi;
220 masm.push(bigInt);
221
222 masm.mov(oldval, bigInt);
223 masm.loadBigInt64(bigInt, expected);
224
225 // |newval| is stored in `edx`, which is already pushed onto the stack.
226 masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), bigInt);
227 masm.loadBigInt64(bigInt, replacement);
228
229 masm.pop(bigInt);
230 }
231
232 if (lir->index()->isConstant()) {
233 Address dest = ToAddress(elements, lir->index(), arrayType);
234 masm.compareExchange64(Synchronization::Full(), dest, expected, replacement,
235 expected);
236 } else {
237 BaseIndex dest(elements, ToRegister(lir->index()),
238 ScaleFromScalarType(arrayType));
239 masm.compareExchange64(Synchronization::Full(), dest, expected, replacement,
240 expected);
241 }
242
243 // Move the result from `edx:eax` to `ecx:ebx`.
244 masm.move64(expected, replacement);
245
246 // OutOfLineCallVM tracks the currently pushed stack entries as reported by
247 // |masm.framePushed()|. We mustn't have any additional entries on the stack
248 // which weren't previously recorded by the safepoint, otherwise the GC
249 // complains when tracing the Ion frames, because the stack frames don't
250 // have their expected layout.
251 MOZ_ASSERT(framePushed == masm.framePushed());
252
253 OutOfLineCode* ool = createBigIntOutOfLine(lir, arrayType, replacement, out);
254
255 // Use `edx:eax`, which are both already on the stack, as temp registers.
256 Register bigInt = eax;
257 Register temp2 = edx;
258
259 Label fail;
260 masm.newGCBigInt(bigInt, temp2, &fail, bigIntsCanBeInNursery());
261 masm.initializeBigInt64(arrayType, bigInt, replacement);
262 masm.mov(bigInt, out);
263 restoreSavedRegisters();
264 masm.jump(ool->rejoin());
265
266 // Couldn't create the BigInt. Restore `edx:eax` and call into the VM.
267 masm.bind(&fail);
268 restoreSavedRegisters();
269 masm.jump(ool->entry());
270
271 // At this point `edx:eax` must have been restored to their original values.
272 masm.bind(ool->rejoin());
273 }
274
visitAtomicExchangeTypedArrayElement64(LAtomicExchangeTypedArrayElement64 * lir)275 void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
276 LAtomicExchangeTypedArrayElement64* lir) {
277 Register elements = ToRegister(lir->elements());
278 Register value = ToRegister(lir->value());
279 Register64 temp1 = ToRegister64(lir->temp1());
280 Register out = ToRegister(lir->output());
281 Register64 temp2 = Register64(value, out);
282
283 MOZ_ASSERT(value == edx);
284 MOZ_ASSERT(temp1 == Register64(ecx, ebx));
285 MOZ_ASSERT(temp2 == Register64(edx, eax));
286 MOZ_ASSERT(out == eax);
287
288 Scalar::Type arrayType = lir->mir()->arrayType();
289
290 DebugOnly<uint32_t> framePushed = masm.framePushed();
291
292 // Save edx before it's clobbered below.
293 masm.push(edx);
294
295 auto restoreSavedRegisters = [&]() { masm.pop(edx); };
296
297 masm.loadBigInt64(value, temp1);
298
299 if (lir->index()->isConstant()) {
300 Address dest = ToAddress(elements, lir->index(), arrayType);
301 masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
302 } else {
303 BaseIndex dest(elements, ToRegister(lir->index()),
304 ScaleFromScalarType(arrayType));
305 masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
306 }
307
308 // Move the result from `edx:eax` to `ecx:ebx`.
309 masm.move64(temp2, temp1);
310
311 // OutOfLineCallVM tracks the currently pushed stack entries as reported by
312 // |masm.framePushed()|. We mustn't have any additional entries on the stack
313 // which weren't previously recorded by the safepoint, otherwise the GC
314 // complains when tracing the Ion frames, because the stack frames don't
315 // have their expected layout.
316 MOZ_ASSERT(framePushed == masm.framePushed());
317
318 OutOfLineCode* ool = createBigIntOutOfLine(lir, arrayType, temp1, out);
319
320 // Use `edx`, which is already on the stack, as a temp register.
321 Register temp = edx;
322
323 Label fail;
324 masm.newGCBigInt(out, temp, &fail, bigIntsCanBeInNursery());
325 masm.initializeBigInt64(arrayType, out, temp1);
326 restoreSavedRegisters();
327 masm.jump(ool->rejoin());
328
329 // Couldn't create the BigInt. Restore `edx` and call into the VM.
330 masm.bind(&fail);
331 restoreSavedRegisters();
332 masm.jump(ool->entry());
333
334 // At this point `edx` must have been restored to its original value.
335 masm.bind(ool->rejoin());
336 }
337
visitAtomicTypedArrayElementBinop64(LAtomicTypedArrayElementBinop64 * lir)338 void CodeGenerator::visitAtomicTypedArrayElementBinop64(
339 LAtomicTypedArrayElementBinop64* lir) {
340 MOZ_ASSERT(!lir->mir()->isForEffect());
341
342 Register elements = ToRegister(lir->elements());
343 Register value = ToRegister(lir->value());
344 Register64 temp1 = ToRegister64(lir->temp1());
345 Register out = ToRegister(lir->output());
346 Register64 temp2 = Register64(value, out);
347
348 MOZ_ASSERT(value == edx);
349 MOZ_ASSERT(temp1 == Register64(ecx, ebx));
350 MOZ_ASSERT(temp2 == Register64(edx, eax));
351 MOZ_ASSERT(out == eax);
352
353 Scalar::Type arrayType = lir->mir()->arrayType();
354 AtomicOp atomicOp = lir->mir()->operation();
355
356 DebugOnly<uint32_t> framePushed = masm.framePushed();
357
358 // Save edx before it's clobbered below.
359 masm.push(edx);
360
361 auto restoreSavedRegisters = [&]() { masm.pop(edx); };
362
363 masm.loadBigInt64(value, temp1);
364
365 masm.Push(temp1);
366
367 Address addr(masm.getStackPointer(), 0);
368
369 if (lir->index()->isConstant()) {
370 Address dest = ToAddress(elements, lir->index(), arrayType);
371 masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
372 temp2);
373 } else {
374 BaseIndex dest(elements, ToRegister(lir->index()),
375 ScaleFromScalarType(arrayType));
376 masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
377 temp2);
378 }
379
380 masm.freeStack(sizeof(uint64_t));
381
382 // Move the result from `edx:eax` to `ecx:ebx`.
383 masm.move64(temp2, temp1);
384
385 // OutOfLineCallVM tracks the currently pushed stack entries as reported by
386 // |masm.framePushed()|. We mustn't have any additional entries on the stack
387 // which weren't previously recorded by the safepoint, otherwise the GC
388 // complains when tracing the Ion frames, because the stack frames don't
389 // have their expected layout.
390 MOZ_ASSERT(framePushed == masm.framePushed());
391
392 OutOfLineCode* ool = createBigIntOutOfLine(lir, arrayType, temp1, out);
393
394 // Use `edx`, which is already on the stack, as a temp register.
395 Register temp = edx;
396
397 Label fail;
398 masm.newGCBigInt(out, temp, &fail, bigIntsCanBeInNursery());
399 masm.initializeBigInt64(arrayType, out, temp1);
400 restoreSavedRegisters();
401 masm.jump(ool->rejoin());
402
403 // Couldn't create the BigInt. Restore `edx` and call into the VM.
404 masm.bind(&fail);
405 restoreSavedRegisters();
406 masm.jump(ool->entry());
407
408 // At this point `edx` must have been restored to its original value.
409 masm.bind(ool->rejoin());
410 }
411
visitAtomicTypedArrayElementBinopForEffect64(LAtomicTypedArrayElementBinopForEffect64 * lir)412 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
413 LAtomicTypedArrayElementBinopForEffect64* lir) {
414 MOZ_ASSERT(lir->mir()->isForEffect());
415
416 Register elements = ToRegister(lir->elements());
417 Register value = ToRegister(lir->value());
418 Register64 temp1 = ToRegister64(lir->temp1());
419 Register tempLow = ToRegister(lir->tempLow());
420 Register64 temp2 = Register64(value, tempLow);
421
422 MOZ_ASSERT(value == edx);
423 MOZ_ASSERT(temp1 == Register64(ecx, ebx));
424 MOZ_ASSERT(temp2 == Register64(edx, eax));
425 MOZ_ASSERT(tempLow == eax);
426
427 Scalar::Type arrayType = lir->mir()->arrayType();
428 AtomicOp atomicOp = lir->mir()->operation();
429
430 // Save edx before it's clobbered below.
431 masm.push(edx);
432
433 masm.loadBigInt64(value, temp1);
434
435 masm.Push(temp1);
436
437 Address addr(masm.getStackPointer(), 0);
438
439 if (lir->index()->isConstant()) {
440 Address dest = ToAddress(elements, lir->index(), arrayType);
441 masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
442 temp2);
443 } else {
444 BaseIndex dest(elements, ToRegister(lir->index()),
445 ScaleFromScalarType(arrayType));
446 masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
447 temp2);
448 }
449
450 masm.freeStack(sizeof(uint64_t));
451
452 masm.pop(edx);
453 }
454
455 // See ../CodeGenerator.cpp for more information.
visitWasmRegisterResult(LWasmRegisterResult * lir)456 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {}
457
visitWasmUint32ToDouble(LWasmUint32ToDouble * lir)458 void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
459 Register input = ToRegister(lir->input());
460 Register temp = ToRegister(lir->temp());
461
462 if (input != temp) {
463 masm.mov(input, temp);
464 }
465
466 // Beware: convertUInt32ToDouble clobbers input.
467 masm.convertUInt32ToDouble(temp, ToFloatRegister(lir->output()));
468 }
469
visitWasmUint32ToFloat32(LWasmUint32ToFloat32 * lir)470 void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
471 Register input = ToRegister(lir->input());
472 Register temp = ToRegister(lir->temp());
473 FloatRegister output = ToFloatRegister(lir->output());
474
475 if (input != temp) {
476 masm.mov(input, temp);
477 }
478
479 // Beware: convertUInt32ToFloat32 clobbers input.
480 masm.convertUInt32ToFloat32(temp, output);
481 }
482
visitWasmHeapBase(LWasmHeapBase * ins)483 void CodeGenerator::visitWasmHeapBase(LWasmHeapBase* ins) {
484 masm.loadPtr(
485 Address(ToRegister(ins->tlsPtr()), offsetof(wasm::TlsData, memoryBase)),
486 ToRegister(ins->output()));
487 }
488
489 template <typename T>
emitWasmLoad(T * ins)490 void CodeGeneratorX86::emitWasmLoad(T* ins) {
491 const MWasmLoad* mir = ins->mir();
492
493 uint32_t offset = mir->access().offset();
494 MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
495
496 const LAllocation* ptr = ins->ptr();
497 const LAllocation* memoryBase = ins->memoryBase();
498
499 // Lowering has set things up so that we can use a BaseIndex form if the
500 // pointer is constant and the offset is zero, or if the pointer is zero.
501
502 Operand srcAddr =
503 ptr->isBogus()
504 ? Operand(ToRegister(memoryBase),
505 offset ? offset : mir->base()->toConstant()->toInt32())
506 : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
507
508 if (mir->type() == MIRType::Int64) {
509 MOZ_ASSERT_IF(mir->access().isAtomic(),
510 mir->access().type() != Scalar::Int64);
511 masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
512 } else {
513 masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
514 }
515 }
516
visitWasmLoad(LWasmLoad * ins)517 void CodeGenerator::visitWasmLoad(LWasmLoad* ins) { emitWasmLoad(ins); }
518
visitWasmLoadI64(LWasmLoadI64 * ins)519 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* ins) { emitWasmLoad(ins); }
520
521 template <typename T>
emitWasmStore(T * ins)522 void CodeGeneratorX86::emitWasmStore(T* ins) {
523 const MWasmStore* mir = ins->mir();
524
525 uint32_t offset = mir->access().offset();
526 MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
527
528 const LAllocation* ptr = ins->ptr();
529 const LAllocation* memoryBase = ins->memoryBase();
530
531 // Lowering has set things up so that we can use a BaseIndex form if the
532 // pointer is constant and the offset is zero, or if the pointer is zero.
533
534 Operand dstAddr =
535 ptr->isBogus()
536 ? Operand(ToRegister(memoryBase),
537 offset ? offset : mir->base()->toConstant()->toInt32())
538 : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
539
540 if (mir->access().type() == Scalar::Int64) {
541 Register64 value =
542 ToRegister64(ins->getInt64Operand(LWasmStoreI64::ValueIndex));
543 masm.wasmStoreI64(mir->access(), value, dstAddr);
544 } else {
545 AnyRegister value = ToAnyRegister(ins->getOperand(LWasmStore::ValueIndex));
546 masm.wasmStore(mir->access(), value, dstAddr);
547 }
548 }
549
visitWasmStore(LWasmStore * ins)550 void CodeGenerator::visitWasmStore(LWasmStore* ins) { emitWasmStore(ins); }
551
visitWasmStoreI64(LWasmStoreI64 * ins)552 void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* ins) {
553 emitWasmStore(ins);
554 }
555
visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap * ins)556 void CodeGenerator::visitWasmCompareExchangeHeap(
557 LWasmCompareExchangeHeap* ins) {
558 MWasmCompareExchangeHeap* mir = ins->mir();
559
560 Register ptrReg = ToRegister(ins->ptr());
561 Register oldval = ToRegister(ins->oldValue());
562 Register newval = ToRegister(ins->newValue());
563 Register addrTemp = ToRegister(ins->addrTemp());
564 Register memoryBase = ToRegister(ins->memoryBase());
565 Register output = ToRegister(ins->output());
566
567 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
568 addrTemp);
569
570 Address memAddr(addrTemp, 0);
571 masm.wasmCompareExchange(mir->access(), memAddr, oldval, newval, output);
572 }
573
visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap * ins)574 void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
575 MWasmAtomicExchangeHeap* mir = ins->mir();
576
577 Register ptrReg = ToRegister(ins->ptr());
578 Register value = ToRegister(ins->value());
579 Register addrTemp = ToRegister(ins->addrTemp());
580 Register memoryBase = ToRegister(ins->memoryBase());
581 Register output = ToRegister(ins->output());
582
583 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
584 addrTemp);
585
586 Address memAddr(addrTemp, 0);
587 masm.wasmAtomicExchange(mir->access(), memAddr, value, output);
588 }
589
visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap * ins)590 void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
591 MWasmAtomicBinopHeap* mir = ins->mir();
592
593 Register ptrReg = ToRegister(ins->ptr());
594 Register temp =
595 ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
596 Register addrTemp = ToRegister(ins->addrTemp());
597 Register out = ToRegister(ins->output());
598 const LAllocation* value = ins->value();
599 AtomicOp op = mir->operation();
600 Register memoryBase = ToRegister(ins->memoryBase());
601
602 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
603 addrTemp);
604
605 Address memAddr(addrTemp, 0);
606 if (value->isConstant()) {
607 masm.wasmAtomicFetchOp(mir->access(), op, Imm32(ToInt32(value)), memAddr,
608 temp, out);
609 } else {
610 masm.wasmAtomicFetchOp(mir->access(), op, ToRegister(value), memAddr, temp,
611 out);
612 }
613 }
614
visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect * ins)615 void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
616 LWasmAtomicBinopHeapForEffect* ins) {
617 MWasmAtomicBinopHeap* mir = ins->mir();
618 MOZ_ASSERT(!mir->hasUses());
619
620 Register ptrReg = ToRegister(ins->ptr());
621 Register addrTemp = ToRegister(ins->addrTemp());
622 const LAllocation* value = ins->value();
623 AtomicOp op = mir->operation();
624 Register memoryBase = ToRegister(ins->memoryBase());
625
626 masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
627 addrTemp);
628
629 Address memAddr(addrTemp, 0);
630 if (value->isConstant()) {
631 masm.wasmAtomicEffectOp(mir->access(), op, Imm32(ToInt32(value)), memAddr,
632 InvalidReg);
633 } else {
634 masm.wasmAtomicEffectOp(mir->access(), op, ToRegister(value), memAddr,
635 InvalidReg);
636 }
637 }
638
visitWasmAtomicLoadI64(LWasmAtomicLoadI64 * ins)639 void CodeGenerator::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* ins) {
640 uint32_t offset = ins->mir()->access().offset();
641 MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
642
643 const LAllocation* memoryBase = ins->memoryBase();
644 const LAllocation* ptr = ins->ptr();
645 BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
646
647 MOZ_ASSERT(ToRegister(ins->t1()) == ecx);
648 MOZ_ASSERT(ToRegister(ins->t2()) == ebx);
649 MOZ_ASSERT(ToOutRegister64(ins).high == edx);
650 MOZ_ASSERT(ToOutRegister64(ins).low == eax);
651
652 masm.wasmAtomicLoad64(ins->mir()->access(), srcAddr, Register64(ecx, ebx),
653 Register64(edx, eax));
654 }
655
visitWasmCompareExchangeI64(LWasmCompareExchangeI64 * ins)656 void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* ins) {
657 uint32_t offset = ins->mir()->access().offset();
658 MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
659
660 const LAllocation* memoryBase = ins->memoryBase();
661 const LAllocation* ptr = ins->ptr();
662 Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
663
664 MOZ_ASSERT(ToRegister64(ins->expected()).low == eax);
665 MOZ_ASSERT(ToRegister64(ins->expected()).high == edx);
666 MOZ_ASSERT(ToRegister64(ins->replacement()).low == ebx);
667 MOZ_ASSERT(ToRegister64(ins->replacement()).high == ecx);
668 MOZ_ASSERT(ToOutRegister64(ins).low == eax);
669 MOZ_ASSERT(ToOutRegister64(ins).high == edx);
670
671 masm.append(ins->mir()->access(), masm.size());
672 masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
673 }
674
675 template <typename T>
emitWasmStoreOrExchangeAtomicI64(T * ins,const wasm::MemoryAccessDesc & access)676 void CodeGeneratorX86::emitWasmStoreOrExchangeAtomicI64(
677 T* ins, const wasm::MemoryAccessDesc& access) {
678 MOZ_ASSERT(access.offset() < masm.wasmMaxOffsetGuardLimit());
679
680 const LAllocation* memoryBase = ins->memoryBase();
681 const LAllocation* ptr = ins->ptr();
682 Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne,
683 access.offset());
684
685 DebugOnly<const LInt64Allocation> value = ins->value();
686 MOZ_ASSERT(ToRegister64(value).low == ebx);
687 MOZ_ASSERT(ToRegister64(value).high == ecx);
688
689 // eax and edx will be overwritten every time through the loop but
690 // memoryBase and ptr must remain live for a possible second iteration.
691
692 MOZ_ASSERT(ToRegister(memoryBase) != edx && ToRegister(memoryBase) != eax);
693 MOZ_ASSERT(ToRegister(ptr) != edx && ToRegister(ptr) != eax);
694
695 Label again;
696 masm.bind(&again);
697 masm.append(access, masm.size());
698 masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
699 masm.j(Assembler::Condition::NonZero, &again);
700 }
701
visitWasmAtomicStoreI64(LWasmAtomicStoreI64 * ins)702 void CodeGenerator::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* ins) {
703 MOZ_ASSERT(ToRegister(ins->t1()) == edx);
704 MOZ_ASSERT(ToRegister(ins->t2()) == eax);
705
706 emitWasmStoreOrExchangeAtomicI64(ins, ins->mir()->access());
707 }
708
visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64 * ins)709 void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* ins) {
710 MOZ_ASSERT(ToOutRegister64(ins).high == edx);
711 MOZ_ASSERT(ToOutRegister64(ins).low == eax);
712
713 emitWasmStoreOrExchangeAtomicI64(ins, ins->access());
714 }
715
visitWasmAtomicBinopI64(LWasmAtomicBinopI64 * ins)716 void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* ins) {
717 uint32_t offset = ins->access().offset();
718 MOZ_ASSERT(offset < masm.wasmMaxOffsetGuardLimit());
719
720 const LAllocation* memoryBase = ins->memoryBase();
721 const LAllocation* ptr = ins->ptr();
722
723 BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
724
725 MOZ_ASSERT(ToRegister(memoryBase) == esi || ToRegister(memoryBase) == edi);
726 MOZ_ASSERT(ToRegister(ptr) == esi || ToRegister(ptr) == edi);
727
728 Register64 value = ToRegister64(ins->value());
729
730 MOZ_ASSERT(value.low == ebx);
731 MOZ_ASSERT(value.high == ecx);
732
733 Register64 output = ToOutRegister64(ins);
734
735 MOZ_ASSERT(output.low == eax);
736 MOZ_ASSERT(output.high == edx);
737
738 masm.Push(ecx);
739 masm.Push(ebx);
740
741 Address valueAddr(esp, 0);
742
743 // Here the `value` register acts as a temp, we'll restore it below.
744 masm.wasmAtomicFetchOp64(ins->access(), ins->operation(), valueAddr, srcAddr,
745 value, output);
746
747 masm.Pop(ebx);
748 masm.Pop(ecx);
749 }
750
751 namespace js {
752 namespace jit {
753
754 class OutOfLineTruncate : public OutOfLineCodeBase<CodeGeneratorX86> {
755 LInstruction* ins_;
756
757 public:
OutOfLineTruncate(LInstruction * ins)758 explicit OutOfLineTruncate(LInstruction* ins) : ins_(ins) {
759 MOZ_ASSERT(ins_->isTruncateDToInt32() ||
760 ins_->isWasmBuiltinTruncateDToInt32());
761 }
762
accept(CodeGeneratorX86 * codegen)763 void accept(CodeGeneratorX86* codegen) override {
764 codegen->visitOutOfLineTruncate(this);
765 }
766
input()767 LAllocation* input() { return ins_->getOperand(0); }
output()768 LDefinition* output() { return ins_->getDef(0); }
tempFloat()769 LDefinition* tempFloat() { return ins_->getTemp(0); }
770
bytecodeOffset() const771 wasm::BytecodeOffset bytecodeOffset() const {
772 if (ins_->isTruncateDToInt32()) {
773 return ins_->toTruncateDToInt32()->mir()->bytecodeOffset();
774 }
775
776 return ins_->toWasmBuiltinTruncateDToInt32()->mir()->bytecodeOffset();
777 }
778 };
779
780 class OutOfLineTruncateFloat32 : public OutOfLineCodeBase<CodeGeneratorX86> {
781 LInstruction* ins_;
782
783 public:
OutOfLineTruncateFloat32(LInstruction * ins)784 explicit OutOfLineTruncateFloat32(LInstruction* ins) : ins_(ins) {
785 MOZ_ASSERT(ins_->isTruncateFToInt32() ||
786 ins_->isWasmBuiltinTruncateFToInt32());
787 }
788
accept(CodeGeneratorX86 * codegen)789 void accept(CodeGeneratorX86* codegen) override {
790 codegen->visitOutOfLineTruncateFloat32(this);
791 }
792
input()793 LAllocation* input() { return ins_->getOperand(0); }
output()794 LDefinition* output() { return ins_->getDef(0); }
tempFloat()795 LDefinition* tempFloat() { return ins_->getTemp(0); }
796
bytecodeOffset() const797 wasm::BytecodeOffset bytecodeOffset() const {
798 if (ins_->isTruncateFToInt32()) {
799 return ins_->toTruncateDToInt32()->mir()->bytecodeOffset();
800 }
801
802 return ins_->toWasmBuiltinTruncateFToInt32()->mir()->bytecodeOffset();
803 }
804 };
805
806 } // namespace jit
807 } // namespace js
808
visitTruncateDToInt32(LTruncateDToInt32 * ins)809 void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
810 FloatRegister input = ToFloatRegister(ins->input());
811 Register output = ToRegister(ins->output());
812
813 OutOfLineTruncate* ool = new (alloc()) OutOfLineTruncate(ins);
814 addOutOfLineCode(ool, ins->mir());
815
816 masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
817 masm.bind(ool->rejoin());
818 }
819
visitWasmBuiltinTruncateDToInt32(LWasmBuiltinTruncateDToInt32 * lir)820 void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
821 LWasmBuiltinTruncateDToInt32* lir) {
822 FloatRegister input = ToFloatRegister(lir->getOperand(0));
823 Register output = ToRegister(lir->getDef(0));
824
825 OutOfLineTruncate* ool = new (alloc()) OutOfLineTruncate(lir);
826 addOutOfLineCode(ool, lir->mir());
827
828 masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
829 masm.bind(ool->rejoin());
830 }
831
visitTruncateFToInt32(LTruncateFToInt32 * ins)832 void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
833 FloatRegister input = ToFloatRegister(ins->input());
834 Register output = ToRegister(ins->output());
835
836 OutOfLineTruncateFloat32* ool = new (alloc()) OutOfLineTruncateFloat32(ins);
837 addOutOfLineCode(ool, ins->mir());
838
839 masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
840 masm.bind(ool->rejoin());
841 }
842
visitWasmBuiltinTruncateFToInt32(LWasmBuiltinTruncateFToInt32 * lir)843 void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
844 LWasmBuiltinTruncateFToInt32* lir) {
845 FloatRegister input = ToFloatRegister(lir->getOperand(0));
846 Register output = ToRegister(lir->getDef(0));
847
848 OutOfLineTruncateFloat32* ool = new (alloc()) OutOfLineTruncateFloat32(lir);
849 addOutOfLineCode(ool, lir->mir());
850
851 masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
852 masm.bind(ool->rejoin());
853 }
854
visitOutOfLineTruncate(OutOfLineTruncate * ool)855 void CodeGeneratorX86::visitOutOfLineTruncate(OutOfLineTruncate* ool) {
856 FloatRegister input = ToFloatRegister(ool->input());
857 Register output = ToRegister(ool->output());
858
859 Label fail;
860
861 if (Assembler::HasSSE3()) {
862 Label failPopDouble;
863 // Push double.
864 masm.subl(Imm32(sizeof(double)), esp);
865 masm.storeDouble(input, Operand(esp, 0));
866
867 // Check exponent to avoid fp exceptions.
868 masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopDouble);
869
870 // Load double, perform 64-bit truncation.
871 masm.truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), output);
872
873 // Load low word, pop double and jump back.
874 masm.load32(Address(esp, 0), output);
875 masm.addl(Imm32(sizeof(double)), esp);
876 masm.jump(ool->rejoin());
877
878 masm.bind(&failPopDouble);
879 masm.addl(Imm32(sizeof(double)), esp);
880 masm.jump(&fail);
881 } else {
882 FloatRegister temp = ToFloatRegister(ool->tempFloat());
883
884 // Try to convert doubles representing integers within 2^32 of a signed
885 // integer, by adding/subtracting 2^32 and then trying to convert to int32.
886 // This has to be an exact conversion, as otherwise the truncation works
887 // incorrectly on the modified value.
888 {
889 ScratchDoubleScope fpscratch(masm);
890 masm.zeroDouble(fpscratch);
891 masm.vucomisd(fpscratch, input);
892 masm.j(Assembler::Parity, &fail);
893 }
894
895 {
896 Label positive;
897 masm.j(Assembler::Above, &positive);
898
899 masm.loadConstantDouble(4294967296.0, temp);
900 Label skip;
901 masm.jmp(&skip);
902
903 masm.bind(&positive);
904 masm.loadConstantDouble(-4294967296.0, temp);
905 masm.bind(&skip);
906 }
907
908 masm.addDouble(input, temp);
909 masm.vcvttsd2si(temp, output);
910 ScratchDoubleScope fpscratch(masm);
911 masm.vcvtsi2sd(output, fpscratch, fpscratch);
912
913 masm.vucomisd(fpscratch, temp);
914 masm.j(Assembler::Parity, &fail);
915 masm.j(Assembler::Equal, ool->rejoin());
916 }
917
918 masm.bind(&fail);
919 {
920 if (gen->compilingWasm()) {
921 masm.Push(WasmTlsReg);
922 }
923 int32_t framePushedAfterTls = masm.framePushed();
924
925 saveVolatile(output);
926
927 if (gen->compilingWasm()) {
928 masm.setupWasmABICall();
929 masm.passABIArg(input, MoveOp::DOUBLE);
930
931 int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
932 masm.callWithABI(ool->bytecodeOffset(), wasm::SymbolicAddress::ToInt32,
933 mozilla::Some(tlsOffset));
934 } else {
935 using Fn = int32_t (*)(double);
936 masm.setupUnalignedABICall(output);
937 masm.passABIArg(input, MoveOp::DOUBLE);
938 masm.callWithABI<Fn, JS::ToInt32>(MoveOp::GENERAL,
939 CheckUnsafeCallWithABI::DontCheckOther);
940 }
941 masm.storeCallInt32Result(output);
942
943 restoreVolatile(output);
944
945 if (gen->compilingWasm()) {
946 masm.Pop(WasmTlsReg);
947 }
948 }
949
950 masm.jump(ool->rejoin());
951 }
952
visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32 * ool)953 void CodeGeneratorX86::visitOutOfLineTruncateFloat32(
954 OutOfLineTruncateFloat32* ool) {
955 FloatRegister input = ToFloatRegister(ool->input());
956 Register output = ToRegister(ool->output());
957
958 Label fail;
959
960 if (Assembler::HasSSE3()) {
961 Label failPopFloat;
962
963 // Push float32, but subtracts 64 bits so that the value popped by fisttp
964 // fits
965 masm.subl(Imm32(sizeof(uint64_t)), esp);
966 masm.storeFloat32(input, Operand(esp, 0));
967
968 // Check exponent to avoid fp exceptions.
969 masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopFloat);
970
971 // Load float, perform 32-bit truncation.
972 masm.truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), output);
973
974 // Load low word, pop 64bits and jump back.
975 masm.load32(Address(esp, 0), output);
976 masm.addl(Imm32(sizeof(uint64_t)), esp);
977 masm.jump(ool->rejoin());
978
979 masm.bind(&failPopFloat);
980 masm.addl(Imm32(sizeof(uint64_t)), esp);
981 masm.jump(&fail);
982 } else {
983 FloatRegister temp = ToFloatRegister(ool->tempFloat());
984
985 // Try to convert float32 representing integers within 2^32 of a signed
986 // integer, by adding/subtracting 2^32 and then trying to convert to int32.
987 // This has to be an exact conversion, as otherwise the truncation works
988 // incorrectly on the modified value.
989 {
990 ScratchFloat32Scope fpscratch(masm);
991 masm.zeroFloat32(fpscratch);
992 masm.vucomiss(fpscratch, input);
993 masm.j(Assembler::Parity, &fail);
994 }
995
996 {
997 Label positive;
998 masm.j(Assembler::Above, &positive);
999
1000 masm.loadConstantFloat32(4294967296.f, temp);
1001 Label skip;
1002 masm.jmp(&skip);
1003
1004 masm.bind(&positive);
1005 masm.loadConstantFloat32(-4294967296.f, temp);
1006 masm.bind(&skip);
1007 }
1008
1009 masm.addFloat32(input, temp);
1010 masm.vcvttss2si(temp, output);
1011 ScratchFloat32Scope fpscratch(masm);
1012 masm.vcvtsi2ss(output, fpscratch, fpscratch);
1013
1014 masm.vucomiss(fpscratch, temp);
1015 masm.j(Assembler::Parity, &fail);
1016 masm.j(Assembler::Equal, ool->rejoin());
1017 }
1018
1019 masm.bind(&fail);
1020 {
1021 if (gen->compilingWasm()) {
1022 masm.Push(WasmTlsReg);
1023 }
1024 int32_t framePushedAfterTls = masm.framePushed();
1025
1026 saveVolatile(output);
1027
1028 masm.Push(input);
1029
1030 if (gen->compilingWasm()) {
1031 masm.setupWasmABICall();
1032 } else {
1033 masm.setupUnalignedABICall(output);
1034 }
1035
1036 masm.vcvtss2sd(input, input, input);
1037 masm.passABIArg(input.asDouble(), MoveOp::DOUBLE);
1038
1039 if (gen->compilingWasm()) {
1040 int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
1041 masm.callWithABI(ool->bytecodeOffset(), wasm::SymbolicAddress::ToInt32,
1042 mozilla::Some(tlsOffset));
1043 } else {
1044 using Fn = int32_t (*)(double);
1045 masm.callWithABI<Fn, JS::ToInt32>(MoveOp::GENERAL,
1046 CheckUnsafeCallWithABI::DontCheckOther);
1047 }
1048
1049 masm.storeCallInt32Result(output);
1050 masm.Pop(input);
1051
1052 restoreVolatile(output);
1053
1054 if (gen->compilingWasm()) {
1055 masm.Pop(WasmTlsReg);
1056 }
1057 }
1058
1059 masm.jump(ool->rejoin());
1060 }
1061
visitCompareI64(LCompareI64 * lir)1062 void CodeGenerator::visitCompareI64(LCompareI64* lir) {
1063 MCompare* mir = lir->mir();
1064 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
1065 mir->compareType() == MCompare::Compare_UInt64);
1066
1067 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
1068 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
1069 Register64 lhsRegs = ToRegister64(lhs);
1070 Register output = ToRegister(lir->output());
1071
1072 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
1073 Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
1074 Label done;
1075
1076 masm.move32(Imm32(1), output);
1077
1078 if (IsConstant(rhs)) {
1079 Imm64 imm = Imm64(ToInt64(rhs));
1080 masm.branch64(condition, lhsRegs, imm, &done);
1081 } else {
1082 Register64 rhsRegs = ToRegister64(rhs);
1083 masm.branch64(condition, lhsRegs, rhsRegs, &done);
1084 }
1085
1086 masm.xorl(output, output);
1087 masm.bind(&done);
1088 }
1089
visitCompareI64AndBranch(LCompareI64AndBranch * lir)1090 void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
1091 MCompare* mir = lir->cmpMir();
1092 MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
1093 mir->compareType() == MCompare::Compare_UInt64);
1094
1095 const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
1096 const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
1097 Register64 lhsRegs = ToRegister64(lhs);
1098
1099 bool isSigned = mir->compareType() == MCompare::Compare_Int64;
1100 Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
1101
1102 Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
1103 Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
1104
1105 if (isNextBlock(lir->ifFalse()->lir())) {
1106 falseLabel = nullptr;
1107 } else if (isNextBlock(lir->ifTrue()->lir())) {
1108 condition = Assembler::InvertCondition(condition);
1109 trueLabel = falseLabel;
1110 falseLabel = nullptr;
1111 }
1112
1113 if (IsConstant(rhs)) {
1114 Imm64 imm = Imm64(ToInt64(rhs));
1115 masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
1116 } else {
1117 Register64 rhsRegs = ToRegister64(rhs);
1118 masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
1119 }
1120 }
1121
visitDivOrModI64(LDivOrModI64 * lir)1122 void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
1123 MOZ_ASSERT(gen->compilingWasm());
1124 MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Tls)) == WasmTlsReg);
1125
1126 masm.Push(WasmTlsReg);
1127 int32_t framePushedAfterTls = masm.framePushed();
1128
1129 Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
1130 Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
1131 Register64 output = ToOutRegister64(lir);
1132
1133 MOZ_ASSERT(output == ReturnReg64);
1134
1135 Label done;
1136
1137 // Handle divide by zero.
1138 if (lir->canBeDivideByZero()) {
1139 Label nonZero;
1140 // We can use WasmTlsReg as temp register because we preserved it before.
1141 masm.branchTest64(Assembler::NonZero, rhs, rhs, WasmTlsReg, &nonZero);
1142 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
1143 masm.bind(&nonZero);
1144 }
1145
1146 MDefinition* mir = lir->mir();
1147
1148 // Handle an integer overflow exception from INT64_MIN / -1.
1149 if (lir->canBeNegativeOverflow()) {
1150 Label notOverflow;
1151 masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), ¬Overflow);
1152 masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), ¬Overflow);
1153 if (mir->isWasmBuiltinModI64()) {
1154 masm.xor64(output, output);
1155 } else {
1156 masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
1157 }
1158 masm.jump(&done);
1159 masm.bind(¬Overflow);
1160 }
1161
1162 masm.setupWasmABICall();
1163 masm.passABIArg(lhs.high);
1164 masm.passABIArg(lhs.low);
1165 masm.passABIArg(rhs.high);
1166 masm.passABIArg(rhs.low);
1167
1168 int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
1169 if (mir->isWasmBuiltinModI64()) {
1170 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64,
1171 mozilla::Some(tlsOffset));
1172 } else {
1173 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64,
1174 mozilla::Some(tlsOffset));
1175 }
1176
1177 // output in edx:eax, move to output register.
1178 masm.movl(edx, output.high);
1179 MOZ_ASSERT(eax == output.low);
1180
1181 masm.bind(&done);
1182 masm.Pop(WasmTlsReg);
1183 }
1184
visitUDivOrModI64(LUDivOrModI64 * lir)1185 void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
1186 MOZ_ASSERT(gen->compilingWasm());
1187 MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Tls)) == WasmTlsReg);
1188
1189 masm.Push(WasmTlsReg);
1190 int32_t framePushedAfterTls = masm.framePushed();
1191
1192 Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
1193 Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
1194 Register64 output = ToOutRegister64(lir);
1195
1196 MOZ_ASSERT(output == ReturnReg64);
1197
1198 // Prevent divide by zero.
1199 if (lir->canBeDivideByZero()) {
1200 Label nonZero;
1201 // We can use WasmTlsReg as temp register because we preserved it before.
1202 masm.branchTest64(Assembler::NonZero, rhs, rhs, WasmTlsReg, &nonZero);
1203 masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
1204 masm.bind(&nonZero);
1205 }
1206
1207 masm.setupWasmABICall();
1208 masm.passABIArg(lhs.high);
1209 masm.passABIArg(lhs.low);
1210 masm.passABIArg(rhs.high);
1211 masm.passABIArg(rhs.low);
1212
1213 MDefinition* mir = lir->mir();
1214 int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
1215 if (mir->isWasmBuiltinModI64()) {
1216 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64,
1217 mozilla::Some(tlsOffset));
1218 } else {
1219 masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64,
1220 mozilla::Some(tlsOffset));
1221 }
1222
1223 // output in edx:eax, move to output register.
1224 masm.movl(edx, output.high);
1225 MOZ_ASSERT(eax == output.low);
1226
1227 masm.Pop(WasmTlsReg);
1228 }
1229
emitBigIntDiv(LBigIntDiv * ins,Register dividend,Register divisor,Register output,Label * fail)1230 void CodeGeneratorX86::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
1231 Register divisor, Register output,
1232 Label* fail) {
1233 // Callers handle division by zero and integer overflow.
1234
1235 MOZ_ASSERT(dividend == eax);
1236 MOZ_ASSERT(output == edx);
1237
1238 // Sign extend the lhs into rdx to make rdx:rax.
1239 masm.cdq();
1240
1241 masm.idiv(divisor);
1242
1243 // Create and return the result.
1244 masm.newGCBigInt(output, divisor, fail, bigIntsCanBeInNursery());
1245 masm.initializeBigInt(output, dividend);
1246 }
1247
emitBigIntMod(LBigIntMod * ins,Register dividend,Register divisor,Register output,Label * fail)1248 void CodeGeneratorX86::emitBigIntMod(LBigIntMod* ins, Register dividend,
1249 Register divisor, Register output,
1250 Label* fail) {
1251 // Callers handle division by zero and integer overflow.
1252
1253 MOZ_ASSERT(dividend == eax);
1254 MOZ_ASSERT(output == edx);
1255
1256 // Sign extend the lhs into rdx to make edx:eax.
1257 masm.cdq();
1258
1259 masm.idiv(divisor);
1260
1261 // Move the remainder from edx.
1262 masm.movl(output, dividend);
1263
1264 // Create and return the result.
1265 masm.newGCBigInt(output, divisor, fail, bigIntsCanBeInNursery());
1266 masm.initializeBigInt(output, dividend);
1267 }
1268
visitWasmSelectI64(LWasmSelectI64 * lir)1269 void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
1270 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
1271
1272 Register cond = ToRegister(lir->condExpr());
1273 Register64 falseExpr = ToRegister64(lir->falseExpr());
1274 Register64 out = ToOutRegister64(lir);
1275
1276 MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
1277 "true expr is reused for input");
1278
1279 Label done;
1280 masm.branchTest32(Assembler::NonZero, cond, cond, &done);
1281 masm.movl(falseExpr.low, out.low);
1282 masm.movl(falseExpr.high, out.high);
1283 masm.bind(&done);
1284 }
1285
visitWasmReinterpretFromI64(LWasmReinterpretFromI64 * lir)1286 void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
1287 MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
1288 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
1289 Register64 input = ToRegister64(lir->getInt64Operand(0));
1290
1291 masm.Push(input.high);
1292 masm.Push(input.low);
1293 masm.vmovq(Operand(esp, 0), ToFloatRegister(lir->output()));
1294 masm.freeStack(sizeof(uint64_t));
1295 }
1296
visitWasmReinterpretToI64(LWasmReinterpretToI64 * lir)1297 void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
1298 MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
1299 MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
1300 Register64 output = ToOutRegister64(lir);
1301
1302 masm.reserveStack(sizeof(uint64_t));
1303 masm.vmovq(ToFloatRegister(lir->input()), Operand(esp, 0));
1304 masm.Pop(output.low);
1305 masm.Pop(output.high);
1306 }
1307
visitExtendInt32ToInt64(LExtendInt32ToInt64 * lir)1308 void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
1309 Register64 output = ToOutRegister64(lir);
1310 Register input = ToRegister(lir->input());
1311
1312 if (lir->mir()->isUnsigned()) {
1313 if (output.low != input) {
1314 masm.movl(input, output.low);
1315 }
1316 masm.xorl(output.high, output.high);
1317 } else {
1318 MOZ_ASSERT(output.low == input);
1319 MOZ_ASSERT(output.low == eax);
1320 MOZ_ASSERT(output.high == edx);
1321 masm.cdq();
1322 }
1323 }
1324
visitSignExtendInt64(LSignExtendInt64 * lir)1325 void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
1326 #ifdef DEBUG
1327 Register64 input = ToRegister64(lir->getInt64Operand(0));
1328 Register64 output = ToOutRegister64(lir);
1329 MOZ_ASSERT(input.low == eax);
1330 MOZ_ASSERT(output.low == eax);
1331 MOZ_ASSERT(input.high == edx);
1332 MOZ_ASSERT(output.high == edx);
1333 #endif
1334 switch (lir->mode()) {
1335 case MSignExtendInt64::Byte:
1336 masm.move8SignExtend(eax, eax);
1337 break;
1338 case MSignExtendInt64::Half:
1339 masm.move16SignExtend(eax, eax);
1340 break;
1341 case MSignExtendInt64::Word:
1342 break;
1343 }
1344 masm.cdq();
1345 }
1346
visitWrapInt64ToInt32(LWrapInt64ToInt32 * lir)1347 void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
1348 const LInt64Allocation& input = lir->getInt64Operand(0);
1349 Register output = ToRegister(lir->output());
1350
1351 if (lir->mir()->bottomHalf()) {
1352 masm.movl(ToRegister(input.low()), output);
1353 } else {
1354 masm.movl(ToRegister(input.high()), output);
1355 }
1356 }
1357
visitWasmExtendU32Index(LWasmExtendU32Index *)1358 void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index*) {
1359 MOZ_CRASH("64-bit only");
1360 }
1361
visitWasmWrapU32Index(LWasmWrapU32Index *)1362 void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index*) {
1363 MOZ_CRASH("64-bit only");
1364 }
1365
visitClzI64(LClzI64 * lir)1366 void CodeGenerator::visitClzI64(LClzI64* lir) {
1367 Register64 input = ToRegister64(lir->getInt64Operand(0));
1368 Register64 output = ToOutRegister64(lir);
1369
1370 masm.clz64(input, output.low);
1371 masm.xorl(output.high, output.high);
1372 }
1373
visitCtzI64(LCtzI64 * lir)1374 void CodeGenerator::visitCtzI64(LCtzI64* lir) {
1375 Register64 input = ToRegister64(lir->getInt64Operand(0));
1376 Register64 output = ToOutRegister64(lir);
1377
1378 masm.ctz64(input, output.low);
1379 masm.xorl(output.high, output.high);
1380 }
1381
visitNotI64(LNotI64 * lir)1382 void CodeGenerator::visitNotI64(LNotI64* lir) {
1383 Register64 input = ToRegister64(lir->getInt64Operand(0));
1384 Register output = ToRegister(lir->output());
1385
1386 if (input.high == output) {
1387 masm.orl(input.low, output);
1388 } else if (input.low == output) {
1389 masm.orl(input.high, output);
1390 } else {
1391 masm.movl(input.high, output);
1392 masm.orl(input.low, output);
1393 }
1394
1395 masm.cmpl(Imm32(0), output);
1396 masm.emitSet(Assembler::Equal, output);
1397 }
1398
visitWasmTruncateToInt64(LWasmTruncateToInt64 * lir)1399 void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
1400 FloatRegister input = ToFloatRegister(lir->input());
1401 Register64 output = ToOutRegister64(lir);
1402
1403 MWasmTruncateToInt64* mir = lir->mir();
1404 FloatRegister floatTemp = ToFloatRegister(lir->temp());
1405
1406 Label fail, convert;
1407
1408 MOZ_ASSERT(mir->input()->type() == MIRType::Double ||
1409 mir->input()->type() == MIRType::Float32);
1410
1411 auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
1412 addOutOfLineCode(ool, mir);
1413
1414 bool isSaturating = mir->isSaturating();
1415 if (mir->input()->type() == MIRType::Float32) {
1416 if (mir->isUnsigned()) {
1417 masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating,
1418 ool->entry(), ool->rejoin(), floatTemp);
1419 } else {
1420 masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, ool->entry(),
1421 ool->rejoin(), floatTemp);
1422 }
1423 } else {
1424 if (mir->isUnsigned()) {
1425 masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, ool->entry(),
1426 ool->rejoin(), floatTemp);
1427 } else {
1428 masm.wasmTruncateDoubleToInt64(input, output, isSaturating, ool->entry(),
1429 ool->rejoin(), floatTemp);
1430 }
1431 }
1432 }
1433
visitInt64ToFloatingPoint(LInt64ToFloatingPoint * lir)1434 void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
1435 Register64 input = ToRegister64(lir->getInt64Operand(0));
1436 FloatRegister output = ToFloatRegister(lir->output());
1437 Register temp =
1438 lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
1439
1440 MIRType outputType = lir->mir()->type();
1441 MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
1442
1443 if (outputType == MIRType::Double) {
1444 if (lir->mir()->isUnsigned()) {
1445 masm.convertUInt64ToDouble(input, output, temp);
1446 } else {
1447 masm.convertInt64ToDouble(input, output);
1448 }
1449 } else {
1450 if (lir->mir()->isUnsigned()) {
1451 masm.convertUInt64ToFloat32(input, output, temp);
1452 } else {
1453 masm.convertInt64ToFloat32(input, output);
1454 }
1455 }
1456 }
1457
visitTestI64AndBranch(LTestI64AndBranch * lir)1458 void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
1459 Register64 input = ToRegister64(lir->getInt64Operand(0));
1460
1461 masm.testl(input.high, input.high);
1462 jumpToBlock(lir->ifTrue(), Assembler::NonZero);
1463 masm.testl(input.low, input.low);
1464 emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
1465 }
1466