1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/x86/Lowering-x86.h"
8
9 #include "jit/Lowering.h"
10 #include "jit/MIR.h"
11 #include "jit/x86/Assembler-x86.h"
12
13 #include "jit/shared/Lowering-shared-inl.h"
14
15 using namespace js;
16 using namespace js::jit;
17
useBoxFixed(MDefinition * mir,Register reg1,Register reg2,bool useAtStart)18 LBoxAllocation LIRGeneratorX86::useBoxFixed(MDefinition* mir, Register reg1,
19 Register reg2, bool useAtStart) {
20 MOZ_ASSERT(mir->type() == MIRType::Value);
21 MOZ_ASSERT(reg1 != reg2);
22
23 ensureDefined(mir);
24 return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
25 LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
26 }
27
useByteOpRegister(MDefinition * mir)28 LAllocation LIRGeneratorX86::useByteOpRegister(MDefinition* mir) {
29 return useFixed(mir, eax);
30 }
31
useByteOpRegisterAtStart(MDefinition * mir)32 LAllocation LIRGeneratorX86::useByteOpRegisterAtStart(MDefinition* mir) {
33 return useFixedAtStart(mir, eax);
34 }
35
useByteOpRegisterOrNonDoubleConstant(MDefinition * mir)36 LAllocation LIRGeneratorX86::useByteOpRegisterOrNonDoubleConstant(
37 MDefinition* mir) {
38 return useFixed(mir, eax);
39 }
40
tempByteOpRegister()41 LDefinition LIRGeneratorX86::tempByteOpRegister() { return tempFixed(eax); }
42
visitBox(MBox * box)43 void LIRGenerator::visitBox(MBox* box) {
44 MDefinition* inner = box->getOperand(0);
45
46 // If the box wrapped a double, it needs a new register.
47 if (IsFloatingPointType(inner->type())) {
48 LDefinition spectreTemp =
49 JitOptions.spectreValueMasking ? temp() : LDefinition::BogusTemp();
50 defineBox(new (alloc()) LBoxFloatingPoint(useRegisterAtStart(inner),
51 tempCopy(inner, 0), spectreTemp,
52 inner->type()),
53 box);
54 return;
55 }
56
57 if (box->canEmitAtUses()) {
58 emitAtUses(box);
59 return;
60 }
61
62 if (inner->isConstant()) {
63 defineBox(new (alloc()) LValue(inner->toConstant()->toJSValue()), box);
64 return;
65 }
66
67 LBox* lir = new (alloc()) LBox(use(inner), inner->type());
68
69 // Otherwise, we should not define a new register for the payload portion
70 // of the output, so bypass defineBox().
71 uint32_t vreg = getVirtualRegister();
72
73 // Note that because we're using BogusTemp(), we do not change the type of
74 // the definition. We also do not define the first output as "TYPE",
75 // because it has no corresponding payload at (vreg + 1). Also note that
76 // although we copy the input's original type for the payload half of the
77 // definition, this is only for clarity. BogusTemp() definitions are
78 // ignored.
79 lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
80 lir->setDef(1, LDefinition::BogusTemp());
81 box->setVirtualRegister(vreg);
82 add(lir);
83 }
84
visitUnbox(MUnbox * unbox)85 void LIRGenerator::visitUnbox(MUnbox* unbox) {
86 MDefinition* inner = unbox->getOperand(0);
87
88 // An unbox on x86 reads in a type tag (either in memory or a register) and
89 // a payload. Unlike most instructions consuming a box, we ask for the type
90 // second, so that the result can re-use the first input.
91 MOZ_ASSERT(inner->type() == MIRType::Value);
92
93 ensureDefined(inner);
94
95 if (IsFloatingPointType(unbox->type())) {
96 LUnboxFloatingPoint* lir =
97 new (alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
98 if (unbox->fallible()) {
99 assignSnapshot(lir, unbox->bailoutKind());
100 }
101 define(lir, unbox);
102 return;
103 }
104
105 // Swap the order we use the box pieces so we can re-use the payload register.
106 LUnbox* lir = new (alloc()) LUnbox;
107 bool reusePayloadReg = !JitOptions.spectreValueMasking ||
108 unbox->type() == MIRType::Int32 ||
109 unbox->type() == MIRType::Boolean;
110 if (reusePayloadReg) {
111 lir->setOperand(0, usePayloadInRegisterAtStart(inner));
112 lir->setOperand(1, useType(inner, LUse::ANY));
113 } else {
114 lir->setOperand(0, usePayload(inner, LUse::REGISTER));
115 lir->setOperand(1, useType(inner, LUse::ANY));
116 }
117
118 if (unbox->fallible()) {
119 assignSnapshot(lir, unbox->bailoutKind());
120 }
121
122 // Types and payloads form two separate intervals. If the type becomes dead
123 // before the payload, it could be used as a Value without the type being
124 // recoverable. Unbox's purpose is to eagerly kill the definition of a type
125 // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
126 // Instead, we create a new virtual register.
127 if (reusePayloadReg) {
128 defineReuseInput(lir, unbox, 0);
129 } else {
130 define(lir, unbox);
131 }
132 }
133
visitReturnImpl(MDefinition * opd,bool isGenerator)134 void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
135 MOZ_ASSERT(opd->type() == MIRType::Value);
136
137 LReturn* ins = new (alloc()) LReturn(isGenerator);
138 ins->setOperand(0, LUse(JSReturnReg_Type));
139 ins->setOperand(1, LUse(JSReturnReg_Data));
140 fillBoxUses(ins, 0, opd);
141 add(ins);
142 }
143
lowerUntypedPhiInput(MPhi * phi,uint32_t inputPosition,LBlock * block,size_t lirIndex)144 void LIRGeneratorX86::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
145 LBlock* block, size_t lirIndex) {
146 MDefinition* operand = phi->getOperand(inputPosition);
147 LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
148 LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
149 type->setOperand(
150 inputPosition,
151 LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
152 payload->setOperand(inputPosition,
153 LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
154 }
155
defineInt64Phi(MPhi * phi,size_t lirIndex)156 void LIRGeneratorX86::defineInt64Phi(MPhi* phi, size_t lirIndex) {
157 LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
158 LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
159
160 uint32_t lowVreg = getVirtualRegister();
161
162 phi->setVirtualRegister(lowVreg);
163
164 uint32_t highVreg = getVirtualRegister();
165 MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
166
167 low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
168 high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
169 annotate(high);
170 annotate(low);
171 }
172
lowerInt64PhiInput(MPhi * phi,uint32_t inputPosition,LBlock * block,size_t lirIndex)173 void LIRGeneratorX86::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
174 LBlock* block, size_t lirIndex) {
175 MDefinition* operand = phi->getOperand(inputPosition);
176 LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
177 LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
178 low->setOperand(inputPosition,
179 LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
180 high->setOperand(
181 inputPosition,
182 LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
183 }
184
lowerForALUInt64(LInstructionHelper<INT64_PIECES,INT64_PIECES,0> * ins,MDefinition * mir,MDefinition * input)185 void LIRGeneratorX86::lowerForALUInt64(
186 LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
187 MDefinition* input) {
188 ins->setInt64Operand(0, useInt64RegisterAtStart(input));
189 defineInt64ReuseInput(ins, mir, 0);
190 }
191
lowerForALUInt64(LInstructionHelper<INT64_PIECES,2* INT64_PIECES,0> * ins,MDefinition * mir,MDefinition * lhs,MDefinition * rhs)192 void LIRGeneratorX86::lowerForALUInt64(
193 LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
194 MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
195 ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
196 ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
197 defineInt64ReuseInput(ins, mir, 0);
198 }
199
lowerForMulInt64(LMulI64 * ins,MMul * mir,MDefinition * lhs,MDefinition * rhs)200 void LIRGeneratorX86::lowerForMulInt64(LMulI64* ins, MMul* mir,
201 MDefinition* lhs, MDefinition* rhs) {
202 bool needsTemp = true;
203
204 if (rhs->isConstant()) {
205 int64_t constant = rhs->toConstant()->toInt64();
206 int32_t shift = mozilla::FloorLog2(constant);
207 // See special cases in CodeGeneratorX86Shared::visitMulI64.
208 if (constant >= -1 && constant <= 2) {
209 needsTemp = false;
210 }
211 if (constant > 0 && int64_t(1) << shift == constant) {
212 needsTemp = false;
213 }
214 }
215
216 // MulI64 on x86 needs output to be in edx, eax;
217 ins->setInt64Operand(
218 0, useInt64Fixed(lhs, Register64(edx, eax), /*useAtStart = */ true));
219 ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
220 if (needsTemp) {
221 ins->setTemp(0, temp());
222 }
223
224 defineInt64Fixed(ins, mir,
225 LInt64Allocation(LAllocation(AnyRegister(edx)),
226 LAllocation(AnyRegister(eax))));
227 }
228
visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement * ins)229 void LIRGenerator::visitCompareExchangeTypedArrayElement(
230 MCompareExchangeTypedArrayElement* ins) {
231 MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
232 MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
233
234 if (Scalar::isBigIntType(ins->arrayType())) {
235 LUse elements = useFixed(ins->elements(), esi);
236 LAllocation index =
237 useRegisterOrIndexConstant(ins->index(), ins->arrayType());
238 LUse oldval = useFixed(ins->oldval(), eax);
239 LUse newval = useFixed(ins->newval(), edx);
240 LDefinition temp = tempFixed(ebx);
241
242 auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
243 elements, index, oldval, newval, temp);
244 defineFixed(lir, ins, LAllocation(AnyRegister(ecx)));
245 assignSafepoint(lir, ins);
246 return;
247 }
248
249 lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ true);
250 }
251
visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement * ins)252 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
253 MAtomicExchangeTypedArrayElement* ins) {
254 MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
255 MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
256
257 if (Scalar::isBigIntType(ins->arrayType())) {
258 LUse elements = useRegister(ins->elements());
259 LAllocation index =
260 useRegisterOrIndexConstant(ins->index(), ins->arrayType());
261 LAllocation value = useFixed(ins->value(), edx);
262 LInt64Definition temp = tempInt64Fixed(Register64(ecx, ebx));
263
264 auto* lir = new (alloc())
265 LAtomicExchangeTypedArrayElement64(elements, index, value, temp);
266 defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
267 assignSafepoint(lir, ins);
268 return;
269 }
270
271 lowerAtomicExchangeTypedArrayElement(ins, /*useI386ByteRegisters=*/true);
272 }
273
visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop * ins)274 void LIRGenerator::visitAtomicTypedArrayElementBinop(
275 MAtomicTypedArrayElementBinop* ins) {
276 MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
277 MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
278
279 if (Scalar::isBigIntType(ins->arrayType())) {
280 LUse elements = useRegister(ins->elements());
281 LAllocation index =
282 useRegisterOrIndexConstant(ins->index(), ins->arrayType());
283 LAllocation value = useFixed(ins->value(), edx);
284 LInt64Definition temp = tempInt64Fixed(Register64(ecx, ebx));
285
286 // Case 1: the result of the operation is not used.
287 //
288 // We can omit allocating the result BigInt.
289
290 if (ins->isForEffect()) {
291 LDefinition tempLow = tempFixed(eax);
292
293 auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
294 elements, index, value, temp, tempLow);
295 add(lir, ins);
296 return;
297 }
298
299 // Case 2: the result of the operation is used.
300
301 auto* lir = new (alloc())
302 LAtomicTypedArrayElementBinop64(elements, index, value, temp);
303 defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
304 assignSafepoint(lir, ins);
305 return;
306 }
307
308 lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ true);
309 }
310
lowerAtomicLoad64(MLoadUnboxedScalar * ins)311 void LIRGeneratorX86::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
312 const LUse elements = useRegister(ins->elements());
313 const LAllocation index =
314 useRegisterOrIndexConstant(ins->index(), ins->storageType());
315
316 auto* lir = new (alloc()) LAtomicLoad64(elements, index, tempFixed(ebx),
317 tempInt64Fixed(Register64(edx, eax)));
318 defineFixed(lir, ins, LAllocation(AnyRegister(ecx)));
319 assignSafepoint(lir, ins);
320 }
321
lowerAtomicStore64(MStoreUnboxedScalar * ins)322 void LIRGeneratorX86::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
323 LUse elements = useRegister(ins->elements());
324 LAllocation index =
325 useRegisterOrIndexConstant(ins->index(), ins->writeType());
326 LAllocation value = useFixed(ins->value(), edx);
327 LInt64Definition temp1 = tempInt64Fixed(Register64(ecx, ebx));
328 LDefinition temp2 = tempFixed(eax);
329
330 add(new (alloc()) LAtomicStore64(elements, index, value, temp1, temp2), ins);
331 }
332
visitWasmUnsignedToDouble(MWasmUnsignedToDouble * ins)333 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
334 MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
335 LWasmUint32ToDouble* lir = new (alloc())
336 LWasmUint32ToDouble(useRegisterAtStart(ins->input()), temp());
337 define(lir, ins);
338 }
339
visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32 * ins)340 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
341 MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
342 LWasmUint32ToFloat32* lir = new (alloc())
343 LWasmUint32ToFloat32(useRegisterAtStart(ins->input()), temp());
344 define(lir, ins);
345 }
346
347 // If the base is a constant, and it is zero or its offset is zero, then
348 // code generation will fold the values into the access. Allocate the
349 // pointer to a register only if that can't happen.
350
OptimizableConstantAccess(MDefinition * base,const wasm::MemoryAccessDesc & access)351 static bool OptimizableConstantAccess(MDefinition* base,
352 const wasm::MemoryAccessDesc& access) {
353 MOZ_ASSERT(base->isConstant());
354 MOZ_ASSERT(base->type() == MIRType::Int32);
355
356 if (!(base->toConstant()->isInt32(0) || access.offset() == 0)) {
357 return false;
358 }
359 if (access.type() == Scalar::Int64) {
360 // For int64 accesses on 32-bit systems we will need to add another offset
361 // of 4 to access the high part of the value; make sure this does not
362 // overflow the value.
363 int32_t v;
364 if (base->toConstant()->isInt32(0)) {
365 v = access.offset();
366 } else {
367 v = base->toConstant()->toInt32();
368 }
369 return v <= int32_t(INT32_MAX - INT64HIGH_OFFSET);
370 }
371 return true;
372 }
373
visitWasmHeapBase(MWasmHeapBase * ins)374 void LIRGenerator::visitWasmHeapBase(MWasmHeapBase* ins) {
375 auto* lir = new (alloc()) LWasmHeapBase(useRegisterAtStart(ins->tlsPtr()));
376 define(lir, ins);
377 }
378
visitWasmLoad(MWasmLoad * ins)379 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
380 MDefinition* base = ins->base();
381 MOZ_ASSERT(base->type() == MIRType::Int32);
382
383 MDefinition* memoryBase = ins->memoryBase();
384 MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
385
386 if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
387 auto* lir = new (alloc())
388 LWasmAtomicLoadI64(useRegister(memoryBase), useRegister(base),
389 tempFixed(ecx), tempFixed(ebx));
390 defineInt64Fixed(lir, ins,
391 LInt64Allocation(LAllocation(AnyRegister(edx)),
392 LAllocation(AnyRegister(eax))));
393 return;
394 }
395
396 LAllocation baseAlloc;
397 if (!base->isConstant() || !OptimizableConstantAccess(base, ins->access())) {
398 baseAlloc = ins->type() == MIRType::Int64 ? useRegister(base)
399 : useRegisterAtStart(base);
400 }
401
402 if (ins->type() != MIRType::Int64) {
403 auto* lir =
404 new (alloc()) LWasmLoad(baseAlloc, useRegisterAtStart(memoryBase));
405 define(lir, ins);
406 return;
407 }
408
409 // "AtStart" register usage does not work for the 64-bit case because we
410 // clobber two registers for the result and may need two registers for a
411 // scaled address; we can't guarantee non-interference.
412
413 auto* lir = new (alloc()) LWasmLoadI64(baseAlloc, useRegister(memoryBase));
414
415 Scalar::Type accessType = ins->access().type();
416 if (accessType == Scalar::Int8 || accessType == Scalar::Int16 ||
417 accessType == Scalar::Int32) {
418 // We use cdq to sign-extend the result and cdq demands these registers.
419 defineInt64Fixed(lir, ins,
420 LInt64Allocation(LAllocation(AnyRegister(edx)),
421 LAllocation(AnyRegister(eax))));
422 return;
423 }
424
425 defineInt64(lir, ins);
426 }
427
visitWasmStore(MWasmStore * ins)428 void LIRGenerator::visitWasmStore(MWasmStore* ins) {
429 MDefinition* base = ins->base();
430 MOZ_ASSERT(base->type() == MIRType::Int32);
431
432 MDefinition* memoryBase = ins->memoryBase();
433 MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
434
435 if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
436 auto* lir = new (alloc())
437 LWasmAtomicStoreI64(useRegister(memoryBase), useRegister(base),
438 useInt64Fixed(ins->value(), Register64(ecx, ebx)),
439 tempFixed(edx), tempFixed(eax));
440 add(lir, ins);
441 return;
442 }
443
444 LAllocation baseAlloc;
445 if (!base->isConstant() || !OptimizableConstantAccess(base, ins->access())) {
446 baseAlloc = useRegisterAtStart(base);
447 }
448
449 LAllocation valueAlloc;
450 switch (ins->access().type()) {
451 case Scalar::Int8:
452 case Scalar::Uint8:
453 // See comment for LIRGeneratorX86::useByteOpRegister.
454 valueAlloc = useFixed(ins->value(), eax);
455 break;
456 case Scalar::Int16:
457 case Scalar::Uint16:
458 case Scalar::Int32:
459 case Scalar::Uint32:
460 case Scalar::Float32:
461 case Scalar::Float64:
462 // For now, don't allow constant values. The immediate operand affects
463 // instruction layout which affects patching.
464 valueAlloc = useRegisterAtStart(ins->value());
465 break;
466 case Scalar::Simd128:
467 #ifdef ENABLE_WASM_SIMD
468 valueAlloc = useRegisterAtStart(ins->value());
469 break;
470 #else
471 MOZ_CRASH("unexpected array type");
472 #endif
473 case Scalar::Int64: {
474 LInt64Allocation valueAlloc = useInt64RegisterAtStart(ins->value());
475 auto* lir = new (alloc())
476 LWasmStoreI64(baseAlloc, valueAlloc, useRegisterAtStart(memoryBase));
477 add(lir, ins);
478 return;
479 }
480 case Scalar::Uint8Clamped:
481 case Scalar::BigInt64:
482 case Scalar::BigUint64:
483 case Scalar::MaxTypedArrayViewType:
484 MOZ_CRASH("unexpected array type");
485 }
486
487 auto* lir = new (alloc())
488 LWasmStore(baseAlloc, valueAlloc, useRegisterAtStart(memoryBase));
489 add(lir, ins);
490 }
491
visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap * ins)492 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
493 MDefinition* base = ins->base();
494 MOZ_ASSERT(base->type() == MIRType::Int32);
495
496 MDefinition* memoryBase = ins->memoryBase();
497 MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
498
499 if (ins->access().type() == Scalar::Int64) {
500 auto* lir = new (alloc()) LWasmCompareExchangeI64(
501 useRegisterAtStart(memoryBase), useRegisterAtStart(base),
502 useInt64FixedAtStart(ins->oldValue(), Register64(edx, eax)),
503 useInt64FixedAtStart(ins->newValue(), Register64(ecx, ebx)));
504 defineInt64Fixed(lir, ins,
505 LInt64Allocation(LAllocation(AnyRegister(edx)),
506 LAllocation(AnyRegister(eax))));
507 return;
508 }
509
510 MOZ_ASSERT(ins->access().type() < Scalar::Float32);
511
512 bool byteArray = byteSize(ins->access().type()) == 1;
513
514 // Register allocation:
515 //
516 // The output may not be used, but eax will be clobbered regardless
517 // so pin the output to eax.
518 //
519 // oldval must be in a register.
520 //
521 // newval must be in a register. If the source is a byte array
522 // then newval must be a register that has a byte size: this must
523 // be ebx, ecx, or edx (eax is taken).
524 //
525 // Bug #1077036 describes some optimization opportunities.
526
527 const LAllocation oldval = useRegister(ins->oldValue());
528 const LAllocation newval =
529 byteArray ? useFixed(ins->newValue(), ebx) : useRegister(ins->newValue());
530
531 LWasmCompareExchangeHeap* lir = new (alloc()) LWasmCompareExchangeHeap(
532 useRegister(base), oldval, newval, useRegister(memoryBase));
533
534 lir->setAddrTemp(temp());
535 defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
536 }
537
visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap * ins)538 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
539 MDefinition* memoryBase = ins->memoryBase();
540 MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
541
542 if (ins->access().type() == Scalar::Int64) {
543 MDefinition* base = ins->base();
544 auto* lir = new (alloc()) LWasmAtomicExchangeI64(
545 useRegister(memoryBase), useRegister(base),
546 useInt64Fixed(ins->value(), Register64(ecx, ebx)), ins->access());
547 defineInt64Fixed(lir, ins,
548 LInt64Allocation(LAllocation(AnyRegister(edx)),
549 LAllocation(AnyRegister(eax))));
550 return;
551 }
552
553 const LAllocation base = useRegister(ins->base());
554 const LAllocation value = useRegister(ins->value());
555
556 LWasmAtomicExchangeHeap* lir = new (alloc())
557 LWasmAtomicExchangeHeap(base, value, useRegister(memoryBase));
558
559 lir->setAddrTemp(temp());
560 if (byteSize(ins->access().type()) == 1) {
561 defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
562 } else {
563 define(lir, ins);
564 }
565 }
566
visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap * ins)567 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
568 MDefinition* base = ins->base();
569 MOZ_ASSERT(base->type() == MIRType::Int32);
570
571 MDefinition* memoryBase = ins->memoryBase();
572 MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
573
574 if (ins->access().type() == Scalar::Int64) {
575 auto* lir = new (alloc())
576 LWasmAtomicBinopI64(useRegister(memoryBase), useRegister(base),
577 useInt64Fixed(ins->value(), Register64(ecx, ebx)),
578 ins->access(), ins->operation());
579 defineInt64Fixed(lir, ins,
580 LInt64Allocation(LAllocation(AnyRegister(edx)),
581 LAllocation(AnyRegister(eax))));
582 return;
583 }
584
585 MOZ_ASSERT(ins->access().type() < Scalar::Float32);
586
587 bool byteArray = byteSize(ins->access().type()) == 1;
588
589 // Case 1: the result of the operation is not used.
590 //
591 // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
592 // LOCK OR, or LOCK XOR. These can all take an immediate.
593
594 if (!ins->hasUses()) {
595 LAllocation value;
596 if (byteArray && !ins->value()->isConstant()) {
597 value = useFixed(ins->value(), ebx);
598 } else {
599 value = useRegisterOrConstant(ins->value());
600 }
601 LWasmAtomicBinopHeapForEffect* lir =
602 new (alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base), value,
603 LDefinition::BogusTemp(),
604 useRegister(memoryBase));
605 lir->setAddrTemp(temp());
606 add(lir, ins);
607 return;
608 }
609
610 // Case 2: the result of the operation is used.
611 //
612 // For ADD and SUB we'll use XADD:
613 //
614 // movl value, output
615 // lock xaddl output, mem
616 //
617 // For the 8-bit variants XADD needs a byte register for the
618 // output only, we can still set up with movl; just pin the output
619 // to eax (or ebx / ecx / edx).
620 //
621 // For AND/OR/XOR we need to use a CMPXCHG loop:
622 //
623 // movl *mem, eax
624 // L: mov eax, temp
625 // andl value, temp
626 // lock cmpxchg temp, mem ; reads eax also
627 // jnz L
628 // ; result in eax
629 //
630 // Note the placement of L, cmpxchg will update eax with *mem if
631 // *mem does not have the expected value, so reloading it at the
632 // top of the loop would be redundant.
633 //
634 // We want to fix eax as the output. We also need a temp for
635 // the intermediate value.
636 //
637 // For the 8-bit variants the temp must have a byte register.
638 //
639 // There are optimization opportunities:
640 // - better 8-bit register allocation and instruction selection, Bug
641 // #1077036.
642
643 bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
644 ins->operation() == AtomicFetchSubOp);
645 LDefinition tempDef = LDefinition::BogusTemp();
646 LAllocation value;
647
648 if (byteArray) {
649 value = useFixed(ins->value(), ebx);
650 if (bitOp) {
651 tempDef = tempFixed(ecx);
652 }
653 } else if (bitOp || ins->value()->isConstant()) {
654 value = useRegisterOrConstant(ins->value());
655 if (bitOp) {
656 tempDef = temp();
657 }
658 } else {
659 value = useRegisterAtStart(ins->value());
660 }
661
662 LWasmAtomicBinopHeap* lir = new (alloc())
663 LWasmAtomicBinopHeap(useRegister(base), value, tempDef,
664 LDefinition::BogusTemp(), useRegister(memoryBase));
665
666 lir->setAddrTemp(temp());
667 if (byteArray || bitOp) {
668 defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
669 } else if (ins->value()->isConstant()) {
670 define(lir, ins);
671 } else {
672 defineReuseInput(lir, ins, LWasmAtomicBinopHeap::valueOp);
673 }
674 }
675
lowerDivI64(MDiv * div)676 void LIRGeneratorX86::lowerDivI64(MDiv* div) {
677 MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
678 }
679
lowerWasmBuiltinDivI64(MWasmBuiltinDivI64 * div)680 void LIRGeneratorX86::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
681 MOZ_ASSERT(div->lhs()->type() == div->rhs()->type());
682 MOZ_ASSERT(IsNumberType(div->type()));
683
684 MOZ_ASSERT(div->type() == MIRType::Int64);
685
686 if (div->isUnsigned()) {
687 LUDivOrModI64* lir = new (alloc())
688 LUDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
689 useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
690 useFixedAtStart(div->tls(), WasmTlsReg));
691 defineReturn(lir, div);
692 return;
693 }
694
695 LDivOrModI64* lir = new (alloc())
696 LDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
697 useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
698 useFixedAtStart(div->tls(), WasmTlsReg));
699 defineReturn(lir, div);
700 }
701
lowerModI64(MMod * mod)702 void LIRGeneratorX86::lowerModI64(MMod* mod) {
703 MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
704 }
705
lowerWasmBuiltinModI64(MWasmBuiltinModI64 * mod)706 void LIRGeneratorX86::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
707 MDefinition* lhs = mod->lhs();
708 MDefinition* rhs = mod->rhs();
709 MOZ_ASSERT(lhs->type() == rhs->type());
710 MOZ_ASSERT(IsNumberType(mod->type()));
711
712 MOZ_ASSERT(mod->type() == MIRType::Int64);
713 MOZ_ASSERT(mod->type() == MIRType::Int64);
714
715 if (mod->isUnsigned()) {
716 LUDivOrModI64* lir = new (alloc())
717 LUDivOrModI64(useInt64FixedAtStart(lhs, Register64(eax, ebx)),
718 useInt64FixedAtStart(rhs, Register64(ecx, edx)),
719 useFixedAtStart(mod->tls(), WasmTlsReg));
720 defineReturn(lir, mod);
721 return;
722 }
723
724 LDivOrModI64* lir = new (alloc())
725 LDivOrModI64(useInt64FixedAtStart(lhs, Register64(eax, ebx)),
726 useInt64FixedAtStart(rhs, Register64(ecx, edx)),
727 useFixedAtStart(mod->tls(), WasmTlsReg));
728 defineReturn(lir, mod);
729 }
730
lowerUDivI64(MDiv * div)731 void LIRGeneratorX86::lowerUDivI64(MDiv* div) {
732 MOZ_CRASH("We use MWasmBuiltinDivI64 instead.");
733 }
734
lowerUModI64(MMod * mod)735 void LIRGeneratorX86::lowerUModI64(MMod* mod) {
736 MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
737 }
738
lowerBigIntDiv(MBigIntDiv * ins)739 void LIRGeneratorX86::lowerBigIntDiv(MBigIntDiv* ins) {
740 auto* lir = new (alloc()) LBigIntDiv(
741 useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(eax), temp());
742 defineFixed(lir, ins, LAllocation(AnyRegister(edx)));
743 assignSafepoint(lir, ins);
744 }
745
lowerBigIntMod(MBigIntMod * ins)746 void LIRGeneratorX86::lowerBigIntMod(MBigIntMod* ins) {
747 auto* lir = new (alloc()) LBigIntMod(
748 useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(eax), temp());
749 defineFixed(lir, ins, LAllocation(AnyRegister(edx)));
750 assignSafepoint(lir, ins);
751 }
752
visitSubstr(MSubstr * ins)753 void LIRGenerator::visitSubstr(MSubstr* ins) {
754 // Due to lack of registers on x86, we reuse the string register as
755 // temporary. As a result we only need two temporary registers and take a
756 // bugos temporary as fifth argument.
757 LSubstr* lir = new (alloc())
758 LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
759 useRegister(ins->length()), temp(), LDefinition::BogusTemp(),
760 tempByteOpRegister());
761 define(lir, ins);
762 assignSafepoint(lir, ins);
763 }
764
visitWasmTruncateToInt64(MWasmTruncateToInt64 * ins)765 void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
766 MDefinition* opd = ins->input();
767 MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
768
769 LDefinition temp = tempDouble();
770 defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd), temp), ins);
771 }
772
lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64 * ins)773 void LIRGeneratorX86::lowerWasmBuiltinTruncateToInt64(
774 MWasmBuiltinTruncateToInt64* ins) {
775 MOZ_CRASH("We don't use it for this architecture");
776 }
777
visitInt64ToFloatingPoint(MInt64ToFloatingPoint * ins)778 void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
779 MDefinition* opd = ins->input();
780 MOZ_ASSERT(opd->type() == MIRType::Int64);
781 MOZ_ASSERT(IsFloatingPointType(ins->type()));
782
783 LDefinition maybeTemp =
784 (ins->isUnsigned() &&
785 ((ins->type() == MIRType::Double && AssemblerX86Shared::HasSSE3()) ||
786 ins->type() == MIRType::Float32))
787 ? temp()
788 : LDefinition::BogusTemp();
789
790 define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd), maybeTemp),
791 ins);
792 }
793
lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint * ins)794 void LIRGeneratorX86::lowerBuiltinInt64ToFloatingPoint(
795 MBuiltinInt64ToFloatingPoint* ins) {
796 MOZ_CRASH("We don't use it for this architecture");
797 }
798
visitExtendInt32ToInt64(MExtendInt32ToInt64 * ins)799 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
800 if (ins->isUnsigned()) {
801 defineInt64(new (alloc())
802 LExtendInt32ToInt64(useRegisterAtStart(ins->input())),
803 ins);
804 } else {
805 LExtendInt32ToInt64* lir =
806 new (alloc()) LExtendInt32ToInt64(useFixedAtStart(ins->input(), eax));
807 defineInt64Fixed(lir, ins,
808 LInt64Allocation(LAllocation(AnyRegister(edx)),
809 LAllocation(AnyRegister(eax))));
810 }
811 }
812
visitSignExtendInt64(MSignExtendInt64 * ins)813 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
814 // Here we'll end up using cdq which requires input and output in (edx,eax).
815 LSignExtendInt64* lir = new (alloc()) LSignExtendInt64(
816 useInt64FixedAtStart(ins->input(), Register64(edx, eax)));
817 defineInt64Fixed(lir, ins,
818 LInt64Allocation(LAllocation(AnyRegister(edx)),
819 LAllocation(AnyRegister(eax))));
820 }
821
822 // On x86 we specialize the only cases where compare is {U,}Int32 and select
823 // is {U,}Int32.
canSpecializeWasmCompareAndSelect(MCompare::CompareType compTy,MIRType insTy)824 bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
825 MCompare::CompareType compTy, MIRType insTy) {
826 return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
827 compTy == MCompare::Compare_UInt32);
828 }
829
lowerWasmCompareAndSelect(MWasmSelect * ins,MDefinition * lhs,MDefinition * rhs,MCompare::CompareType compTy,JSOp jsop)830 void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
831 MDefinition* lhs,
832 MDefinition* rhs,
833 MCompare::CompareType compTy,
834 JSOp jsop) {
835 MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
836 auto* lir = new (alloc()) LWasmCompareAndSelect(
837 useRegister(lhs), useAny(rhs), compTy, jsop,
838 useRegisterAtStart(ins->trueExpr()), useAny(ins->falseExpr()));
839 defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
840 }
841