1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/MacroAssembler-inl.h"
8
9 #include "mozilla/FloatingPoint.h"
10 #include "mozilla/MathAlgorithms.h"
11 #include "mozilla/XorShift128PlusRNG.h"
12
13 #include <algorithm>
14
15 #include "jsfriendapi.h"
16
17 #include "gc/GCProbes.h"
18 #include "jit/ABIFunctions.h"
19 #include "jit/AtomicOp.h"
20 #include "jit/AtomicOperations.h"
21 #include "jit/Bailouts.h"
22 #include "jit/BaselineFrame.h"
23 #include "jit/BaselineIC.h"
24 #include "jit/BaselineJIT.h"
25 #include "jit/JitFrames.h"
26 #include "jit/JitOptions.h"
27 #include "jit/JitRuntime.h"
28 #include "jit/Lowering.h"
29 #include "jit/MIR.h"
30 #include "jit/MoveEmitter.h"
31 #include "jit/SharedICHelpers.h"
32 #include "jit/SharedICRegisters.h"
33 #include "jit/Simulator.h"
34 #include "js/Conversions.h"
35 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
36 #include "js/ScalarType.h" // js::Scalar::Type
37 #include "vm/ArgumentsObject.h"
38 #include "vm/ArrayBufferViewObject.h"
39 #include "vm/FunctionFlags.h" // js::FunctionFlags
40 #include "vm/JSContext.h"
41 #include "vm/TraceLogging.h"
42 #include "vm/TypedArrayObject.h"
43 #include "wasm/WasmTypes.h"
44 #include "wasm/WasmValidate.h"
45
46 #include "gc/Nursery-inl.h"
47 #include "jit/ABIFunctionList-inl.h"
48 #include "jit/shared/Lowering-shared-inl.h"
49 #include "jit/TemplateObject-inl.h"
50 #include "vm/BytecodeUtil-inl.h"
51 #include "vm/Interpreter-inl.h"
52 #include "vm/JSObject-inl.h"
53
54 using namespace js;
55 using namespace js::jit;
56
57 using JS::GenericNaN;
58 using JS::ToInt32;
59
60 using mozilla::CheckedInt;
61
preBarrierTrampoline(MIRType type)62 TrampolinePtr MacroAssembler::preBarrierTrampoline(MIRType type) {
63 const JitRuntime* rt = GetJitContext()->runtime->jitRuntime();
64 return rt->preBarrier(type);
65 }
66
67 template <typename S, typename T>
StoreToTypedFloatArray(MacroAssembler & masm,int arrayType,const S & value,const T & dest)68 static void StoreToTypedFloatArray(MacroAssembler& masm, int arrayType,
69 const S& value, const T& dest) {
70 switch (arrayType) {
71 case Scalar::Float32:
72 masm.storeFloat32(value, dest);
73 break;
74 case Scalar::Float64:
75 masm.storeDouble(value, dest);
76 break;
77 default:
78 MOZ_CRASH("Invalid typed array type");
79 }
80 }
81
storeToTypedFloatArray(Scalar::Type arrayType,FloatRegister value,const BaseIndex & dest)82 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
83 FloatRegister value,
84 const BaseIndex& dest) {
85 StoreToTypedFloatArray(*this, arrayType, value, dest);
86 }
storeToTypedFloatArray(Scalar::Type arrayType,FloatRegister value,const Address & dest)87 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
88 FloatRegister value,
89 const Address& dest) {
90 StoreToTypedFloatArray(*this, arrayType, value, dest);
91 }
92
93 template <typename S, typename T>
StoreToTypedBigIntArray(MacroAssembler & masm,Scalar::Type arrayType,const S & value,const T & dest)94 static void StoreToTypedBigIntArray(MacroAssembler& masm,
95 Scalar::Type arrayType, const S& value,
96 const T& dest) {
97 MOZ_ASSERT(Scalar::isBigIntType(arrayType));
98 masm.store64(value, dest);
99 }
100
storeToTypedBigIntArray(Scalar::Type arrayType,Register64 value,const BaseIndex & dest)101 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
102 Register64 value,
103 const BaseIndex& dest) {
104 StoreToTypedBigIntArray(*this, arrayType, value, dest);
105 }
storeToTypedBigIntArray(Scalar::Type arrayType,Register64 value,const Address & dest)106 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
107 Register64 value,
108 const Address& dest) {
109 StoreToTypedBigIntArray(*this, arrayType, value, dest);
110 }
111
boxUint32(Register source,ValueOperand dest,Uint32Mode mode,Label * fail)112 void MacroAssembler::boxUint32(Register source, ValueOperand dest,
113 Uint32Mode mode, Label* fail) {
114 switch (mode) {
115 // Fail if the value does not fit in an int32.
116 case Uint32Mode::FailOnDouble: {
117 branchTest32(Assembler::Signed, source, source, fail);
118 tagValue(JSVAL_TYPE_INT32, source, dest);
119 break;
120 }
121 case Uint32Mode::ForceDouble: {
122 // Always convert the value to double.
123 ScratchDoubleScope fpscratch(*this);
124 convertUInt32ToDouble(source, fpscratch);
125 boxDouble(fpscratch, dest, fpscratch);
126 break;
127 }
128 }
129 }
130
131 template <typename T>
loadFromTypedArray(Scalar::Type arrayType,const T & src,AnyRegister dest,Register temp,Label * fail)132 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
133 AnyRegister dest, Register temp,
134 Label* fail) {
135 switch (arrayType) {
136 case Scalar::Int8:
137 load8SignExtend(src, dest.gpr());
138 break;
139 case Scalar::Uint8:
140 case Scalar::Uint8Clamped:
141 load8ZeroExtend(src, dest.gpr());
142 break;
143 case Scalar::Int16:
144 load16SignExtend(src, dest.gpr());
145 break;
146 case Scalar::Uint16:
147 load16ZeroExtend(src, dest.gpr());
148 break;
149 case Scalar::Int32:
150 load32(src, dest.gpr());
151 break;
152 case Scalar::Uint32:
153 if (dest.isFloat()) {
154 load32(src, temp);
155 convertUInt32ToDouble(temp, dest.fpu());
156 } else {
157 load32(src, dest.gpr());
158
159 // Bail out if the value doesn't fit into a signed int32 value. This
160 // is what allows MLoadUnboxedScalar to have a type() of
161 // MIRType::Int32 for UInt32 array loads.
162 branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
163 }
164 break;
165 case Scalar::Float32:
166 loadFloat32(src, dest.fpu());
167 canonicalizeFloat(dest.fpu());
168 break;
169 case Scalar::Float64:
170 loadDouble(src, dest.fpu());
171 canonicalizeDouble(dest.fpu());
172 break;
173 case Scalar::BigInt64:
174 case Scalar::BigUint64:
175 default:
176 MOZ_CRASH("Invalid typed array type");
177 }
178 }
179
180 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
181 const Address& src,
182 AnyRegister dest,
183 Register temp, Label* fail);
184 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
185 const BaseIndex& src,
186 AnyRegister dest,
187 Register temp, Label* fail);
188
189 template <typename T>
loadFromTypedArray(Scalar::Type arrayType,const T & src,const ValueOperand & dest,Uint32Mode uint32Mode,Register temp,Label * fail)190 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
191 const ValueOperand& dest,
192 Uint32Mode uint32Mode, Register temp,
193 Label* fail) {
194 switch (arrayType) {
195 case Scalar::Int8:
196 case Scalar::Uint8:
197 case Scalar::Uint8Clamped:
198 case Scalar::Int16:
199 case Scalar::Uint16:
200 case Scalar::Int32:
201 loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()),
202 InvalidReg, nullptr);
203 tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
204 break;
205 case Scalar::Uint32:
206 // Don't clobber dest when we could fail, instead use temp.
207 load32(src, temp);
208 boxUint32(temp, dest, uint32Mode, fail);
209 break;
210 case Scalar::Float32: {
211 ScratchDoubleScope dscratch(*this);
212 FloatRegister fscratch = dscratch.asSingle();
213 loadFromTypedArray(arrayType, src, AnyRegister(fscratch),
214 dest.scratchReg(), nullptr);
215 convertFloat32ToDouble(fscratch, dscratch);
216 boxDouble(dscratch, dest, dscratch);
217 break;
218 }
219 case Scalar::Float64: {
220 ScratchDoubleScope fpscratch(*this);
221 loadFromTypedArray(arrayType, src, AnyRegister(fpscratch),
222 dest.scratchReg(), nullptr);
223 boxDouble(fpscratch, dest, fpscratch);
224 break;
225 }
226 case Scalar::BigInt64:
227 case Scalar::BigUint64:
228 default:
229 MOZ_CRASH("Invalid typed array type");
230 }
231 }
232
233 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
234 const Address& src,
235 const ValueOperand& dest,
236 Uint32Mode uint32Mode,
237 Register temp, Label* fail);
238 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
239 const BaseIndex& src,
240 const ValueOperand& dest,
241 Uint32Mode uint32Mode,
242 Register temp, Label* fail);
243
244 template <typename T>
loadFromTypedBigIntArray(Scalar::Type arrayType,const T & src,Register bigInt,Register64 temp)245 void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
246 const T& src, Register bigInt,
247 Register64 temp) {
248 MOZ_ASSERT(Scalar::isBigIntType(arrayType));
249
250 load64(src, temp);
251 initializeBigInt64(arrayType, bigInt, temp);
252 }
253
254 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
255 const Address& src,
256 Register bigInt,
257 Register64 temp);
258 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
259 const BaseIndex& src,
260 Register bigInt,
261 Register64 temp);
262
263 // Inlined version of gc::CheckAllocatorState that checks the bare essentials
264 // and bails for anything that cannot be handled with our jit allocators.
checkAllocatorState(Label * fail)265 void MacroAssembler::checkAllocatorState(Label* fail) {
266 // Don't execute the inline path if GC probes are built in.
267 #ifdef JS_GC_PROBES
268 jump(fail);
269 #endif
270
271 #ifdef JS_GC_ZEAL
272 // Don't execute the inline path if gc zeal or tracing are active.
273 const uint32_t* ptrZealModeBits =
274 GetJitContext()->runtime->addressOfGCZealModeBits();
275 branch32(Assembler::NotEqual, AbsoluteAddress(ptrZealModeBits), Imm32(0),
276 fail);
277 #endif
278
279 // Don't execute the inline path if the realm has an object metadata callback,
280 // as the metadata to use for the object may vary between executions of the
281 // op.
282 if (GetJitContext()->realm()->hasAllocationMetadataBuilder()) {
283 jump(fail);
284 }
285 }
286
shouldNurseryAllocate(gc::AllocKind allocKind,gc::InitialHeap initialHeap)287 bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind,
288 gc::InitialHeap initialHeap) {
289 // Note that Ion elides barriers on writes to objects known to be in the
290 // nursery, so any allocation that can be made into the nursery must be made
291 // into the nursery, even if the nursery is disabled. At runtime these will
292 // take the out-of-line path, which is required to insert a barrier for the
293 // initializing writes.
294 return IsNurseryAllocable(allocKind) && initialHeap != gc::TenuredHeap;
295 }
296
297 // Inline version of Nursery::allocateObject. If the object has dynamic slots,
298 // this fills in the slots_ pointer.
nurseryAllocateObject(Register result,Register temp,gc::AllocKind allocKind,size_t nDynamicSlots,Label * fail,const AllocSiteInput & allocSite)299 void MacroAssembler::nurseryAllocateObject(Register result, Register temp,
300 gc::AllocKind allocKind,
301 size_t nDynamicSlots, Label* fail,
302 const AllocSiteInput& allocSite) {
303 MOZ_ASSERT(IsNurseryAllocable(allocKind));
304
305 // We still need to allocate in the nursery, per the comment in
306 // shouldNurseryAllocate; however, we need to insert into the
307 // mallocedBuffers set, so bail to do the nursery allocation in the
308 // interpreter.
309 if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
310 jump(fail);
311 return;
312 }
313
314 // Check whether this allocation site needs pretenuring. This dynamic check
315 // only happens for baseline code.
316 if (allocSite.is<Register>()) {
317 Register site = allocSite.as<Register>();
318 branch32(Assembler::Equal, Address(site, gc::AllocSite::offsetOfState()),
319 Imm32(int32_t(gc::AllocSite::State::LongLived)), fail);
320 }
321
322 // No explicit check for nursery.isEnabled() is needed, as the comparison
323 // with the nursery's end will always fail in such cases.
324 CompileZone* zone = GetJitContext()->realm()->zone();
325 size_t thingSize = gc::Arena::thingSize(allocKind);
326 size_t totalSize = thingSize + ObjectSlots::allocSize(nDynamicSlots);
327 MOZ_ASSERT(totalSize < INT32_MAX);
328 MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
329
330 bumpPointerAllocate(result, temp, fail, zone,
331 zone->addressOfNurseryPosition(),
332 zone->addressOfNurseryCurrentEnd(), JS::TraceKind::Object,
333 totalSize, allocSite);
334
335 if (nDynamicSlots) {
336 store32(Imm32(nDynamicSlots),
337 Address(result, thingSize + ObjectSlots::offsetOfCapacity()));
338 store32(
339 Imm32(0),
340 Address(result, thingSize + ObjectSlots::offsetOfDictionarySlotSpan()));
341 computeEffectiveAddress(
342 Address(result, thingSize + ObjectSlots::offsetOfSlots()), temp);
343 storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
344 }
345 }
346
347 // Inlined version of FreeSpan::allocate. This does not fill in slots_.
freeListAllocate(Register result,Register temp,gc::AllocKind allocKind,Label * fail)348 void MacroAssembler::freeListAllocate(Register result, Register temp,
349 gc::AllocKind allocKind, Label* fail) {
350 CompileZone* zone = GetJitContext()->realm()->zone();
351 int thingSize = int(gc::Arena::thingSize(allocKind));
352
353 Label fallback;
354 Label success;
355
356 // Load the first and last offsets of |zone|'s free list for |allocKind|.
357 // If there is no room remaining in the span, fall back to get the next one.
358 gc::FreeSpan** ptrFreeList = zone->addressOfFreeList(allocKind);
359 loadPtr(AbsoluteAddress(ptrFreeList), temp);
360 load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
361 load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
362 branch32(Assembler::AboveOrEqual, result, temp, &fallback);
363
364 // Bump the offset for the next allocation.
365 add32(Imm32(thingSize), result);
366 loadPtr(AbsoluteAddress(ptrFreeList), temp);
367 store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
368 sub32(Imm32(thingSize), result);
369 addPtr(temp, result); // Turn the offset into a pointer.
370 jump(&success);
371
372 bind(&fallback);
373 // If there are no free spans left, we bail to finish the allocation. The
374 // interpreter will call the GC allocator to set up a new arena to allocate
375 // from, after which we can resume allocating in the jit.
376 branchTest32(Assembler::Zero, result, result, fail);
377 loadPtr(AbsoluteAddress(ptrFreeList), temp);
378 addPtr(temp, result); // Turn the offset into a pointer.
379 Push(result);
380 // Update the free list to point to the next span (which may be empty).
381 load32(Address(result, 0), result);
382 store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
383 Pop(result);
384
385 bind(&success);
386
387 if (GetJitContext()->runtime->geckoProfiler().enabled()) {
388 uint32_t* countAddress =
389 GetJitContext()->runtime->addressOfTenuredAllocCount();
390 movePtr(ImmPtr(countAddress), temp);
391 add32(Imm32(1), Address(temp, 0));
392 }
393 }
394
callFreeStub(Register slots)395 void MacroAssembler::callFreeStub(Register slots) {
396 // This register must match the one in JitRuntime::generateFreeStub.
397 const Register regSlots = CallTempReg0;
398
399 push(regSlots);
400 movePtr(slots, regSlots);
401 call(GetJitContext()->runtime->jitRuntime()->freeStub());
402 pop(regSlots);
403 }
404
405 // Inlined equivalent of gc::AllocateObject, without failure case handling.
allocateObject(Register result,Register temp,gc::AllocKind allocKind,uint32_t nDynamicSlots,gc::InitialHeap initialHeap,Label * fail,const AllocSiteInput & allocSite)406 void MacroAssembler::allocateObject(Register result, Register temp,
407 gc::AllocKind allocKind,
408 uint32_t nDynamicSlots,
409 gc::InitialHeap initialHeap, Label* fail,
410 const AllocSiteInput& allocSite) {
411 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
412
413 checkAllocatorState(fail);
414
415 if (shouldNurseryAllocate(allocKind, initialHeap)) {
416 MOZ_ASSERT(initialHeap == gc::DefaultHeap);
417 return nurseryAllocateObject(result, temp, allocKind, nDynamicSlots, fail,
418 allocSite);
419 }
420
421 // Fall back to calling into the VM to allocate objects in the tenured heap
422 // that have dynamic slots.
423 if (nDynamicSlots) {
424 jump(fail);
425 return;
426 }
427
428 return freeListAllocate(result, temp, allocKind, fail);
429 }
430
createGCObject(Register obj,Register temp,const TemplateObject & templateObj,gc::InitialHeap initialHeap,Label * fail,bool initContents)431 void MacroAssembler::createGCObject(Register obj, Register temp,
432 const TemplateObject& templateObj,
433 gc::InitialHeap initialHeap, Label* fail,
434 bool initContents /* = true */) {
435 gc::AllocKind allocKind = templateObj.getAllocKind();
436 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
437
438 uint32_t nDynamicSlots = 0;
439 if (templateObj.isNativeObject()) {
440 const TemplateNativeObject& ntemplate =
441 templateObj.asTemplateNativeObject();
442 nDynamicSlots = ntemplate.numDynamicSlots();
443 }
444
445 allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
446 initGCThing(obj, temp, templateObj, initContents);
447 }
448
createPlainGCObject(Register result,Register shape,Register temp,Register temp2,uint32_t numFixedSlots,uint32_t numDynamicSlots,gc::AllocKind allocKind,gc::InitialHeap initialHeap,Label * fail,const AllocSiteInput & allocSite)449 void MacroAssembler::createPlainGCObject(
450 Register result, Register shape, Register temp, Register temp2,
451 uint32_t numFixedSlots, uint32_t numDynamicSlots, gc::AllocKind allocKind,
452 gc::InitialHeap initialHeap, Label* fail, const AllocSiteInput& allocSite) {
453 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
454 MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
455
456 // Allocate object.
457 allocateObject(result, temp, allocKind, numDynamicSlots, initialHeap, fail,
458 allocSite);
459
460 // Initialize shape field.
461 storePtr(shape, Address(result, JSObject::offsetOfShape()));
462
463 // If the object has dynamic slots, allocateObject will initialize
464 // the slots field. If not, we must initialize it now.
465 if (numDynamicSlots == 0) {
466 storePtr(ImmPtr(emptyObjectSlots),
467 Address(result, NativeObject::offsetOfSlots()));
468 }
469
470 // Initialize elements field.
471 storePtr(ImmPtr(emptyObjectElements),
472 Address(result, NativeObject::offsetOfElements()));
473
474 // Initialize fixed slots.
475 fillSlotsWithUndefined(Address(result, NativeObject::getFixedSlotOffset(0)),
476 temp, 0, numFixedSlots);
477
478 // Initialize dynamic slots.
479 if (numDynamicSlots > 0) {
480 loadPtr(Address(result, NativeObject::offsetOfSlots()), temp2);
481 fillSlotsWithUndefined(Address(temp2, 0), temp, 0, numDynamicSlots);
482 }
483 }
484
createArrayWithFixedElements(Register result,Register shape,Register temp,uint32_t arrayLength,uint32_t arrayCapacity,gc::AllocKind allocKind,gc::InitialHeap initialHeap,Label * fail,const AllocSiteInput & allocSite)485 void MacroAssembler::createArrayWithFixedElements(
486 Register result, Register shape, Register temp, uint32_t arrayLength,
487 uint32_t arrayCapacity, gc::AllocKind allocKind,
488 gc::InitialHeap initialHeap, Label* fail, const AllocSiteInput& allocSite) {
489 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
490 MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
491 MOZ_ASSERT(result != temp);
492
493 // This only supports allocating arrays with fixed elements and does not
494 // support any dynamic slots or elements.
495 MOZ_ASSERT(arrayCapacity >= arrayLength);
496 MOZ_ASSERT(gc::GetGCKindSlots(allocKind) >=
497 arrayCapacity + ObjectElements::VALUES_PER_HEADER);
498
499 // Allocate object.
500 allocateObject(result, temp, allocKind, 0, initialHeap, fail, allocSite);
501
502 // Initialize shape field.
503 storePtr(shape, Address(result, JSObject::offsetOfShape()));
504
505 // There are no dynamic slots.
506 storePtr(ImmPtr(emptyObjectSlots),
507 Address(result, NativeObject::offsetOfSlots()));
508
509 // Initialize elements pointer for fixed (inline) elements.
510 computeEffectiveAddress(
511 Address(result, NativeObject::offsetOfFixedElements()), temp);
512 storePtr(temp, Address(result, NativeObject::offsetOfElements()));
513
514 // Initialize elements header.
515 store32(Imm32(0), Address(temp, ObjectElements::offsetOfFlags()));
516 store32(Imm32(0), Address(temp, ObjectElements::offsetOfInitializedLength()));
517 store32(Imm32(arrayCapacity),
518 Address(temp, ObjectElements::offsetOfCapacity()));
519 store32(Imm32(arrayLength), Address(temp, ObjectElements::offsetOfLength()));
520 }
521
522 // Inline version of Nursery::allocateString.
nurseryAllocateString(Register result,Register temp,gc::AllocKind allocKind,Label * fail)523 void MacroAssembler::nurseryAllocateString(Register result, Register temp,
524 gc::AllocKind allocKind,
525 Label* fail) {
526 MOZ_ASSERT(IsNurseryAllocable(allocKind));
527
528 // No explicit check for nursery.isEnabled() is needed, as the comparison
529 // with the nursery's end will always fail in such cases.
530
531 CompileZone* zone = GetJitContext()->realm()->zone();
532 uint64_t* allocStrsPtr = &zone->zone()->nurseryAllocatedStrings.ref();
533 inc64(AbsoluteAddress(allocStrsPtr));
534 size_t thingSize = gc::Arena::thingSize(allocKind);
535
536 bumpPointerAllocate(result, temp, fail, zone,
537 zone->addressOfStringNurseryPosition(),
538 zone->addressOfStringNurseryCurrentEnd(),
539 JS::TraceKind::String, thingSize);
540 }
541
542 // Inline version of Nursery::allocateBigInt.
nurseryAllocateBigInt(Register result,Register temp,Label * fail)543 void MacroAssembler::nurseryAllocateBigInt(Register result, Register temp,
544 Label* fail) {
545 MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT));
546
547 // No explicit check for nursery.isEnabled() is needed, as the comparison
548 // with the nursery's end will always fail in such cases.
549
550 CompileZone* zone = GetJitContext()->realm()->zone();
551 size_t thingSize = gc::Arena::thingSize(gc::AllocKind::BIGINT);
552
553 bumpPointerAllocate(result, temp, fail, zone,
554 zone->addressOfBigIntNurseryPosition(),
555 zone->addressOfBigIntNurseryCurrentEnd(),
556 JS::TraceKind::BigInt, thingSize);
557 }
558
bumpPointerAllocate(Register result,Register temp,Label * fail,CompileZone * zone,void * posAddr,const void * curEndAddr,JS::TraceKind traceKind,uint32_t size,const AllocSiteInput & allocSite)559 void MacroAssembler::bumpPointerAllocate(Register result, Register temp,
560 Label* fail, CompileZone* zone,
561 void* posAddr, const void* curEndAddr,
562 JS::TraceKind traceKind, uint32_t size,
563 const AllocSiteInput& allocSite) {
564 uint32_t totalSize = size + Nursery::nurseryCellHeaderSize();
565 MOZ_ASSERT(totalSize < INT32_MAX, "Nursery allocation too large");
566 MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
567
568 // The position (allocation pointer) and the end pointer are stored
569 // very close to each other -- specifically, easily within a 32 bit offset.
570 // Use relative offsets between them, to avoid 64-bit immediate loads.
571 //
572 // I tried to optimise this further by using an extra register to avoid
573 // the final subtraction and hopefully get some more instruction
574 // parallelism, but it made no difference.
575 movePtr(ImmPtr(posAddr), temp);
576 loadPtr(Address(temp, 0), result);
577 addPtr(Imm32(totalSize), result);
578 CheckedInt<int32_t> endOffset =
579 (CheckedInt<uintptr_t>(uintptr_t(curEndAddr)) -
580 CheckedInt<uintptr_t>(uintptr_t(posAddr)))
581 .toChecked<int32_t>();
582 MOZ_ASSERT(endOffset.isValid(), "Position and end pointers must be nearby");
583 branchPtr(Assembler::Below, Address(temp, endOffset.value()), result, fail);
584 storePtr(result, Address(temp, 0));
585 subPtr(Imm32(size), result);
586
587 if (GetJitContext()->runtime->geckoProfiler().enabled()) {
588 uint32_t* countAddress = zone->addressOfNurseryAllocCount();
589 CheckedInt<int32_t> counterOffset =
590 (CheckedInt<uintptr_t>(uintptr_t(countAddress)) -
591 CheckedInt<uintptr_t>(uintptr_t(posAddr)))
592 .toChecked<int32_t>();
593 if (counterOffset.isValid()) {
594 add32(Imm32(1), Address(temp, counterOffset.value()));
595 } else {
596 movePtr(ImmPtr(countAddress), temp);
597 add32(Imm32(1), Address(temp, 0));
598 }
599 }
600
601 if (allocSite.is<gc::CatchAllAllocSite>()) {
602 // No allocation site supplied. This is the case when called from Warp, or
603 // from places that don't support pretenuring.
604 gc::CatchAllAllocSite siteKind = allocSite.as<gc::CatchAllAllocSite>();
605 storePtr(ImmWord(zone->nurseryCellHeader(traceKind, siteKind)),
606 Address(result, -js::Nursery::nurseryCellHeaderSize()));
607 } else {
608 // Update allocation site and store pointer in the nursery cell header. This
609 // is only used from baseline.
610 Register site = allocSite.as<Register>();
611 updateAllocSite(temp, result, zone, site);
612 // See NurseryCellHeader::MakeValue.
613 orPtr(Imm32(int32_t(traceKind)), site);
614 storePtr(site, Address(result, -js::Nursery::nurseryCellHeaderSize()));
615 }
616 }
617
618 // Update the allocation site in the same way as Nursery::allocateCell.
updateAllocSite(Register temp,Register result,CompileZone * zone,Register site)619 void MacroAssembler::updateAllocSite(Register temp, Register result,
620 CompileZone* zone, Register site) {
621 Label done;
622
623 add32(Imm32(1), Address(site, gc::AllocSite::offsetOfNurseryAllocCount()));
624
625 branchPtr(Assembler::NotEqual,
626 Address(site, gc::AllocSite::offsetOfNextNurseryAllocated()),
627 ImmPtr(nullptr), &done);
628
629 loadPtr(AbsoluteAddress(zone->addressOfNurseryAllocatedSites()), temp);
630 storePtr(temp, Address(site, gc::AllocSite::offsetOfNextNurseryAllocated()));
631 storePtr(site, AbsoluteAddress(zone->addressOfNurseryAllocatedSites()));
632
633 bind(&done);
634 }
635
636 // Inlined equivalent of gc::AllocateString, jumping to fail if nursery
637 // allocation requested but unsuccessful.
allocateString(Register result,Register temp,gc::AllocKind allocKind,gc::InitialHeap initialHeap,Label * fail)638 void MacroAssembler::allocateString(Register result, Register temp,
639 gc::AllocKind allocKind,
640 gc::InitialHeap initialHeap, Label* fail) {
641 MOZ_ASSERT(allocKind == gc::AllocKind::STRING ||
642 allocKind == gc::AllocKind::FAT_INLINE_STRING);
643
644 checkAllocatorState(fail);
645
646 if (shouldNurseryAllocate(allocKind, initialHeap)) {
647 MOZ_ASSERT(initialHeap == gc::DefaultHeap);
648 return nurseryAllocateString(result, temp, allocKind, fail);
649 }
650
651 freeListAllocate(result, temp, allocKind, fail);
652 }
653
newGCString(Register result,Register temp,Label * fail,bool attemptNursery)654 void MacroAssembler::newGCString(Register result, Register temp, Label* fail,
655 bool attemptNursery) {
656 allocateString(result, temp, js::gc::AllocKind::STRING,
657 attemptNursery ? gc::DefaultHeap : gc::TenuredHeap, fail);
658 }
659
newGCFatInlineString(Register result,Register temp,Label * fail,bool attemptNursery)660 void MacroAssembler::newGCFatInlineString(Register result, Register temp,
661 Label* fail, bool attemptNursery) {
662 allocateString(result, temp, js::gc::AllocKind::FAT_INLINE_STRING,
663 attemptNursery ? gc::DefaultHeap : gc::TenuredHeap, fail);
664 }
665
newGCBigInt(Register result,Register temp,Label * fail,bool attemptNursery)666 void MacroAssembler::newGCBigInt(Register result, Register temp, Label* fail,
667 bool attemptNursery) {
668 checkAllocatorState(fail);
669
670 gc::InitialHeap initialHeap =
671 attemptNursery ? gc::DefaultHeap : gc::TenuredHeap;
672 if (shouldNurseryAllocate(gc::AllocKind::BIGINT, initialHeap)) {
673 MOZ_ASSERT(initialHeap == gc::DefaultHeap);
674 return nurseryAllocateBigInt(result, temp, fail);
675 }
676
677 freeListAllocate(result, temp, gc::AllocKind::BIGINT, fail);
678 }
679
copySlotsFromTemplate(Register obj,const TemplateNativeObject & templateObj,uint32_t start,uint32_t end)680 void MacroAssembler::copySlotsFromTemplate(
681 Register obj, const TemplateNativeObject& templateObj, uint32_t start,
682 uint32_t end) {
683 uint32_t nfixed = std::min(templateObj.numFixedSlots(), end);
684 for (unsigned i = start; i < nfixed; i++) {
685 // Template objects are not exposed to script and therefore immutable.
686 // However, regexp template objects are sometimes used directly (when
687 // the cloning is not observable), and therefore we can end up with a
688 // non-zero lastIndex. Detect this case here and just substitute 0, to
689 // avoid racing with the main thread updating this slot.
690 Value v;
691 if (templateObj.isRegExpObject() && i == RegExpObject::lastIndexSlot()) {
692 v = Int32Value(0);
693 } else {
694 v = templateObj.getSlot(i);
695 }
696 storeValue(v, Address(obj, NativeObject::getFixedSlotOffset(i)));
697 }
698 }
699
fillSlotsWithConstantValue(Address base,Register temp,uint32_t start,uint32_t end,const Value & v)700 void MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
701 uint32_t start, uint32_t end,
702 const Value& v) {
703 MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
704
705 if (start >= end) {
706 return;
707 }
708
709 #ifdef JS_NUNBOX32
710 // We only have a single spare register, so do the initialization as two
711 // strided writes of the tag and body.
712 Address addr = base;
713 move32(Imm32(v.toNunboxPayload()), temp);
714 for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue)) {
715 store32(temp, ToPayload(addr));
716 }
717
718 addr = base;
719 move32(Imm32(v.toNunboxTag()), temp);
720 for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue)) {
721 store32(temp, ToType(addr));
722 }
723 #else
724 moveValue(v, ValueOperand(temp));
725 for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtrValue)) {
726 storePtr(temp, base);
727 }
728 #endif
729 }
730
fillSlotsWithUndefined(Address base,Register temp,uint32_t start,uint32_t end)731 void MacroAssembler::fillSlotsWithUndefined(Address base, Register temp,
732 uint32_t start, uint32_t end) {
733 fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
734 }
735
fillSlotsWithUninitialized(Address base,Register temp,uint32_t start,uint32_t end)736 void MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp,
737 uint32_t start, uint32_t end) {
738 fillSlotsWithConstantValue(base, temp, start, end,
739 MagicValue(JS_UNINITIALIZED_LEXICAL));
740 }
741
FindStartOfUninitializedAndUndefinedSlots(const TemplateNativeObject & templateObj,uint32_t nslots,uint32_t * startOfUninitialized,uint32_t * startOfUndefined)742 static void FindStartOfUninitializedAndUndefinedSlots(
743 const TemplateNativeObject& templateObj, uint32_t nslots,
744 uint32_t* startOfUninitialized, uint32_t* startOfUndefined) {
745 MOZ_ASSERT(nslots == templateObj.slotSpan());
746 MOZ_ASSERT(nslots > 0);
747
748 uint32_t first = nslots;
749 for (; first != 0; --first) {
750 if (templateObj.getSlot(first - 1) != UndefinedValue()) {
751 break;
752 }
753 }
754 *startOfUndefined = first;
755
756 if (first != 0 && IsUninitializedLexical(templateObj.getSlot(first - 1))) {
757 for (; first != 0; --first) {
758 if (!IsUninitializedLexical(templateObj.getSlot(first - 1))) {
759 break;
760 }
761 }
762 *startOfUninitialized = first;
763 } else {
764 *startOfUninitialized = *startOfUndefined;
765 }
766 }
767
768 template <typename Src>
storeObjPrivate(Src ptr,const Address & address)769 inline void MacroAssembler::storeObjPrivate(Src ptr, const Address& address) {
770 // The private pointer is stored as a PrivateValue in a JS::Value, so on 32
771 // bit systems we also need to zero the top word.
772 #ifdef JS_PUNBOX64
773 storePtr(ptr, address);
774 #else
775 storePtr(ptr, LowWord(address));
776 store32(Imm32(0), HighWord(address));
777 #endif
778 }
779
initTypedArraySlots(Register obj,Register temp,Register lengthReg,LiveRegisterSet liveRegs,Label * fail,TypedArrayObject * templateObj,TypedArrayLength lengthKind)780 void MacroAssembler::initTypedArraySlots(Register obj, Register temp,
781 Register lengthReg,
782 LiveRegisterSet liveRegs, Label* fail,
783 TypedArrayObject* templateObj,
784 TypedArrayLength lengthKind) {
785 MOZ_ASSERT(templateObj->hasPrivate());
786 MOZ_ASSERT(!templateObj->hasBuffer());
787
788 constexpr size_t dataSlotOffset = ArrayBufferViewObject::dataOffset();
789 constexpr size_t dataOffset = dataSlotOffset + sizeof(HeapSlot);
790
791 static_assert(
792 TypedArrayObject::FIXED_DATA_START == TypedArrayObject::DATA_SLOT + 1,
793 "fixed inline element data assumed to begin after the data slot");
794
795 static_assert(
796 TypedArrayObject::INLINE_BUFFER_LIMIT ==
797 JSObject::MAX_BYTE_SIZE - dataOffset,
798 "typed array inline buffer is limited by the maximum object byte size");
799
800 // Initialise data elements to zero.
801 size_t length = templateObj->length();
802 MOZ_ASSERT(length <= INT32_MAX,
803 "Template objects are only created for int32 lengths");
804 size_t nbytes = length * templateObj->bytesPerElement();
805
806 if (lengthKind == TypedArrayLength::Fixed &&
807 nbytes <= TypedArrayObject::INLINE_BUFFER_LIMIT) {
808 MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
809
810 // Store data elements inside the remaining JSObject slots.
811 computeEffectiveAddress(Address(obj, dataOffset), temp);
812 storeObjPrivate(temp, Address(obj, dataSlotOffset));
813
814 // Write enough zero pointers into fixed data to zero every
815 // element. (This zeroes past the end of a byte count that's
816 // not a multiple of pointer size. That's okay, because fixed
817 // data is a count of 8-byte HeapSlots (i.e. <= pointer size),
818 // and we won't inline unless the desired memory fits in that
819 // space.)
820 static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
821
822 size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char*);
823 for (size_t i = 0; i < numZeroPointers; i++) {
824 storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char*)));
825 }
826 MOZ_ASSERT(nbytes > 0, "Zero-length TypedArrays need ZeroLengthArrayData");
827 } else {
828 if (lengthKind == TypedArrayLength::Fixed) {
829 move32(Imm32(length), lengthReg);
830 }
831
832 // Allocate a buffer on the heap to store the data elements.
833 liveRegs.addUnchecked(temp);
834 liveRegs.addUnchecked(obj);
835 liveRegs.addUnchecked(lengthReg);
836 PushRegsInMask(liveRegs);
837 using Fn = void (*)(JSContext * cx, TypedArrayObject * obj, int32_t count);
838 setupUnalignedABICall(temp);
839 loadJSContext(temp);
840 passABIArg(temp);
841 passABIArg(obj);
842 passABIArg(lengthReg);
843 callWithABI<Fn, AllocateAndInitTypedArrayBuffer>();
844 PopRegsInMask(liveRegs);
845
846 // Fail when data elements is set to NULL.
847 branchPtr(Assembler::Equal, Address(obj, dataSlotOffset), ImmWord(0), fail);
848 }
849 }
850
initGCSlots(Register obj,Register temp,const TemplateNativeObject & templateObj,bool initContents)851 void MacroAssembler::initGCSlots(Register obj, Register temp,
852 const TemplateNativeObject& templateObj,
853 bool initContents) {
854 // Slots of non-array objects are required to be initialized.
855 // Use the values currently in the template object.
856 uint32_t nslots = templateObj.slotSpan();
857 if (nslots == 0) {
858 return;
859 }
860
861 uint32_t nfixed = templateObj.numUsedFixedSlots();
862 uint32_t ndynamic = templateObj.numDynamicSlots();
863
864 // Attempt to group slot writes such that we minimize the amount of
865 // duplicated data we need to embed in code and load into registers. In
866 // general, most template object slots will be undefined except for any
867 // reserved slots. Since reserved slots come first, we split the object
868 // logically into independent non-UndefinedValue writes to the head and
869 // duplicated writes of UndefinedValue to the tail. For the majority of
870 // objects, the "tail" will be the entire slot range.
871 //
872 // The template object may be a CallObject, in which case we need to
873 // account for uninitialized lexical slots as well as undefined
874 // slots. Unitialized lexical slots appears in CallObjects if the function
875 // has parameter expressions, in which case closed over parameters have
876 // TDZ. Uninitialized slots come before undefined slots in CallObjects.
877 uint32_t startOfUninitialized = nslots;
878 uint32_t startOfUndefined = nslots;
879 FindStartOfUninitializedAndUndefinedSlots(
880 templateObj, nslots, &startOfUninitialized, &startOfUndefined);
881 MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
882 MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
883 MOZ_ASSERT_IF(!templateObj.isCallObject(),
884 startOfUninitialized == startOfUndefined);
885
886 // Copy over any preserved reserved slots.
887 copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
888
889 // Fill the rest of the fixed slots with undefined and uninitialized.
890 if (initContents) {
891 size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
892 fillSlotsWithUninitialized(Address(obj, offset), temp, startOfUninitialized,
893 std::min(startOfUndefined, nfixed));
894
895 if (startOfUndefined < nfixed) {
896 offset = NativeObject::getFixedSlotOffset(startOfUndefined);
897 fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined,
898 nfixed);
899 }
900 }
901
902 if (ndynamic) {
903 // We are short one register to do this elegantly. Borrow the obj
904 // register briefly for our slots base address.
905 push(obj);
906 loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
907
908 // Fill uninitialized slots if necessary. Otherwise initialize all
909 // slots to undefined.
910 if (startOfUndefined > nfixed) {
911 MOZ_ASSERT(startOfUninitialized != startOfUndefined);
912 fillSlotsWithUninitialized(Address(obj, 0), temp, 0,
913 startOfUndefined - nfixed);
914 size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
915 fillSlotsWithUndefined(Address(obj, offset), temp,
916 startOfUndefined - nfixed, ndynamic);
917 } else {
918 fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
919 }
920
921 pop(obj);
922 }
923 }
924
initGCThing(Register obj,Register temp,const TemplateObject & templateObj,bool initContents)925 void MacroAssembler::initGCThing(Register obj, Register temp,
926 const TemplateObject& templateObj,
927 bool initContents) {
928 // Fast initialization of an empty object returned by allocateObject().
929
930 storePtr(ImmGCPtr(templateObj.shape()),
931 Address(obj, JSObject::offsetOfShape()));
932
933 if (templateObj.isNativeObject()) {
934 const TemplateNativeObject& ntemplate =
935 templateObj.asTemplateNativeObject();
936 MOZ_ASSERT(!ntemplate.hasDynamicElements());
937
938 // If the object has dynamic slots, the slots member has already been
939 // filled in.
940 if (!ntemplate.hasDynamicSlots()) {
941 storePtr(ImmPtr(emptyObjectSlots),
942 Address(obj, NativeObject::offsetOfSlots()));
943 }
944
945 if (ntemplate.isArrayObject()) {
946 int elementsOffset = NativeObject::offsetOfFixedElements();
947
948 computeEffectiveAddress(Address(obj, elementsOffset), temp);
949 storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
950
951 // Fill in the elements header.
952 store32(
953 Imm32(ntemplate.getDenseCapacity()),
954 Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
955 store32(Imm32(ntemplate.getDenseInitializedLength()),
956 Address(obj, elementsOffset +
957 ObjectElements::offsetOfInitializedLength()));
958 store32(Imm32(ntemplate.getArrayLength()),
959 Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
960 store32(Imm32(0),
961 Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
962 MOZ_ASSERT(!ntemplate.hasPrivate());
963 } else if (ntemplate.isArgumentsObject()) {
964 // The caller will initialize the reserved slots.
965 MOZ_ASSERT(!initContents);
966 MOZ_ASSERT(!ntemplate.hasPrivate());
967 storePtr(ImmPtr(emptyObjectElements),
968 Address(obj, NativeObject::offsetOfElements()));
969 } else {
970 // If the target type could be a TypedArray that maps shared memory
971 // then this would need to store emptyObjectElementsShared in that case.
972 MOZ_ASSERT(!ntemplate.isSharedMemory());
973
974 storePtr(ImmPtr(emptyObjectElements),
975 Address(obj, NativeObject::offsetOfElements()));
976
977 initGCSlots(obj, temp, ntemplate, initContents);
978
979 if (ntemplate.hasPrivate() && !ntemplate.isTypedArrayObject()) {
980 uint32_t nfixed = ntemplate.numFixedSlots();
981 Address privateSlot(obj, NativeObject::getPrivateDataOffset(nfixed));
982 storeObjPrivate(ImmPtr(ntemplate.getPrivate()), privateSlot);
983 }
984 }
985 } else {
986 MOZ_CRASH("Unknown object");
987 }
988
989 #ifdef JS_GC_PROBES
990 AllocatableRegisterSet regs(RegisterSet::Volatile());
991 LiveRegisterSet save(regs.asLiveSet());
992 PushRegsInMask(save);
993
994 regs.takeUnchecked(obj);
995 Register temp2 = regs.takeAnyGeneral();
996
997 using Fn = void (*)(JSObject * obj);
998 setupUnalignedABICall(temp2);
999 passABIArg(obj);
1000 callWithABI<Fn, TraceCreateObject>();
1001
1002 PopRegsInMask(save);
1003 #endif
1004 }
1005
compareStrings(JSOp op,Register left,Register right,Register result,Label * fail)1006 void MacroAssembler::compareStrings(JSOp op, Register left, Register right,
1007 Register result, Label* fail) {
1008 MOZ_ASSERT(left != result);
1009 MOZ_ASSERT(right != result);
1010 MOZ_ASSERT(IsEqualityOp(op) || IsRelationalOp(op));
1011
1012 Label notPointerEqual;
1013 // If operands point to the same instance, the strings are trivially equal.
1014 branchPtr(Assembler::NotEqual, left, right,
1015 IsEqualityOp(op) ? ¬PointerEqual : fail);
1016 move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
1017 op == JSOp::Ge),
1018 result);
1019
1020 if (IsEqualityOp(op)) {
1021 Label done;
1022 jump(&done);
1023
1024 bind(¬PointerEqual);
1025
1026 Label leftIsNotAtom;
1027 Label setNotEqualResult;
1028 // Atoms cannot be equal to each other if they point to different strings.
1029 Imm32 atomBit(JSString::ATOM_BIT);
1030 branchTest32(Assembler::Zero, Address(left, JSString::offsetOfFlags()),
1031 atomBit, &leftIsNotAtom);
1032 branchTest32(Assembler::NonZero, Address(right, JSString::offsetOfFlags()),
1033 atomBit, &setNotEqualResult);
1034
1035 bind(&leftIsNotAtom);
1036 // Strings of different length can never be equal.
1037 loadStringLength(left, result);
1038 branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()),
1039 result, fail);
1040
1041 bind(&setNotEqualResult);
1042 move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), result);
1043
1044 bind(&done);
1045 }
1046 }
1047
loadStringChars(Register str,Register dest,CharEncoding encoding)1048 void MacroAssembler::loadStringChars(Register str, Register dest,
1049 CharEncoding encoding) {
1050 MOZ_ASSERT(str != dest);
1051
1052 if (JitOptions.spectreStringMitigations) {
1053 if (encoding == CharEncoding::Latin1) {
1054 // If the string is a rope, zero the |str| register. The code below
1055 // depends on str->flags so this should block speculative execution.
1056 movePtr(ImmWord(0), dest);
1057 test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1058 Imm32(JSString::LINEAR_BIT), dest, str);
1059 } else {
1060 // If we're loading TwoByte chars, there's an additional risk:
1061 // if the string has Latin1 chars, we could read out-of-bounds. To
1062 // prevent this, we check both the Linear and Latin1 bits. We don't
1063 // have a scratch register, so we use these flags also to block
1064 // speculative execution, similar to the use of 0 above.
1065 MOZ_ASSERT(encoding == CharEncoding::TwoByte);
1066 static constexpr uint32_t Mask =
1067 JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT;
1068 static_assert(Mask < 1024,
1069 "Mask should be a small, near-null value to ensure we "
1070 "block speculative execution when it's used as string "
1071 "pointer");
1072 move32(Imm32(Mask), dest);
1073 and32(Address(str, JSString::offsetOfFlags()), dest);
1074 cmp32MovePtr(Assembler::NotEqual, dest, Imm32(JSString::LINEAR_BIT), dest,
1075 str);
1076 }
1077 }
1078
1079 // Load the inline chars.
1080 computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
1081 dest);
1082
1083 // If it's not an inline string, load the non-inline chars. Use a
1084 // conditional move to prevent speculative execution.
1085 test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1086 Imm32(JSString::INLINE_CHARS_BIT),
1087 Address(str, JSString::offsetOfNonInlineChars()), dest);
1088 }
1089
loadNonInlineStringChars(Register str,Register dest,CharEncoding encoding)1090 void MacroAssembler::loadNonInlineStringChars(Register str, Register dest,
1091 CharEncoding encoding) {
1092 MOZ_ASSERT(str != dest);
1093
1094 if (JitOptions.spectreStringMitigations) {
1095 // If the string is a rope, has inline chars, or has a different
1096 // character encoding, set str to a near-null value to prevent
1097 // speculative execution below (when reading str->nonInlineChars).
1098
1099 static constexpr uint32_t Mask = JSString::LINEAR_BIT |
1100 JSString::INLINE_CHARS_BIT |
1101 JSString::LATIN1_CHARS_BIT;
1102 static_assert(Mask < 1024,
1103 "Mask should be a small, near-null value to ensure we "
1104 "block speculative execution when it's used as string "
1105 "pointer");
1106
1107 uint32_t expectedBits = JSString::LINEAR_BIT;
1108 if (encoding == CharEncoding::Latin1) {
1109 expectedBits |= JSString::LATIN1_CHARS_BIT;
1110 }
1111
1112 move32(Imm32(Mask), dest);
1113 and32(Address(str, JSString::offsetOfFlags()), dest);
1114
1115 cmp32MovePtr(Assembler::NotEqual, dest, Imm32(expectedBits), dest, str);
1116 }
1117
1118 loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
1119 }
1120
storeNonInlineStringChars(Register chars,Register str)1121 void MacroAssembler::storeNonInlineStringChars(Register chars, Register str) {
1122 MOZ_ASSERT(chars != str);
1123 storePtr(chars, Address(str, JSString::offsetOfNonInlineChars()));
1124 }
1125
loadInlineStringCharsForStore(Register str,Register dest)1126 void MacroAssembler::loadInlineStringCharsForStore(Register str,
1127 Register dest) {
1128 computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
1129 dest);
1130 }
1131
loadInlineStringChars(Register str,Register dest,CharEncoding encoding)1132 void MacroAssembler::loadInlineStringChars(Register str, Register dest,
1133 CharEncoding encoding) {
1134 MOZ_ASSERT(str != dest);
1135
1136 if (JitOptions.spectreStringMitigations) {
1137 // Making this Spectre-safe is a bit complicated: using
1138 // computeEffectiveAddress and then zeroing the output register if
1139 // non-inline is not sufficient: when the index is very large, it would
1140 // allow reading |nullptr + index|. Just fall back to loadStringChars
1141 // for now.
1142 loadStringChars(str, dest, encoding);
1143 } else {
1144 computeEffectiveAddress(
1145 Address(str, JSInlineString::offsetOfInlineStorage()), dest);
1146 }
1147 }
1148
loadRopeLeftChild(Register str,Register dest)1149 void MacroAssembler::loadRopeLeftChild(Register str, Register dest) {
1150 MOZ_ASSERT(str != dest);
1151
1152 if (JitOptions.spectreStringMitigations) {
1153 // Zero the output register if the input was not a rope.
1154 movePtr(ImmWord(0), dest);
1155 test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1156 Imm32(JSString::LINEAR_BIT),
1157 Address(str, JSRope::offsetOfLeft()), dest);
1158 } else {
1159 loadPtr(Address(str, JSRope::offsetOfLeft()), dest);
1160 }
1161 }
1162
storeRopeChildren(Register left,Register right,Register str)1163 void MacroAssembler::storeRopeChildren(Register left, Register right,
1164 Register str) {
1165 storePtr(left, Address(str, JSRope::offsetOfLeft()));
1166 storePtr(right, Address(str, JSRope::offsetOfRight()));
1167 }
1168
loadDependentStringBase(Register str,Register dest)1169 void MacroAssembler::loadDependentStringBase(Register str, Register dest) {
1170 MOZ_ASSERT(str != dest);
1171
1172 if (JitOptions.spectreStringMitigations) {
1173 // If the string is not a dependent string, zero the |str| register.
1174 // The code below loads str->base so this should block speculative
1175 // execution.
1176 movePtr(ImmWord(0), dest);
1177 test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1178 Imm32(JSString::DEPENDENT_BIT), dest, str);
1179 }
1180
1181 loadPtr(Address(str, JSDependentString::offsetOfBase()), dest);
1182 }
1183
storeDependentStringBase(Register base,Register str)1184 void MacroAssembler::storeDependentStringBase(Register base, Register str) {
1185 storePtr(base, Address(str, JSDependentString::offsetOfBase()));
1186 }
1187
loadStringChar(Register str,Register index,Register output,Register scratch,Label * fail)1188 void MacroAssembler::loadStringChar(Register str, Register index,
1189 Register output, Register scratch,
1190 Label* fail) {
1191 MOZ_ASSERT(str != output);
1192 MOZ_ASSERT(str != index);
1193 MOZ_ASSERT(index != output);
1194 MOZ_ASSERT(output != scratch);
1195
1196 movePtr(str, output);
1197
1198 // This follows JSString::getChar.
1199 Label notRope;
1200 branchIfNotRope(str, ¬Rope);
1201
1202 loadRopeLeftChild(str, output);
1203
1204 // Check if the index is contained in the leftChild.
1205 // Todo: Handle index in the rightChild.
1206 spectreBoundsCheck32(index, Address(output, JSString::offsetOfLength()),
1207 scratch, fail);
1208
1209 // If the left side is another rope, give up.
1210 branchIfRope(output, fail);
1211
1212 bind(¬Rope);
1213
1214 Label isLatin1, done;
1215 // We have to check the left/right side for ropes,
1216 // because a TwoByte rope might have a Latin1 child.
1217 branchLatin1String(output, &isLatin1);
1218 loadStringChars(output, scratch, CharEncoding::TwoByte);
1219 loadChar(scratch, index, output, CharEncoding::TwoByte);
1220 jump(&done);
1221
1222 bind(&isLatin1);
1223 loadStringChars(output, scratch, CharEncoding::Latin1);
1224 loadChar(scratch, index, output, CharEncoding::Latin1);
1225
1226 bind(&done);
1227 }
1228
loadStringIndexValue(Register str,Register dest,Label * fail)1229 void MacroAssembler::loadStringIndexValue(Register str, Register dest,
1230 Label* fail) {
1231 MOZ_ASSERT(str != dest);
1232
1233 load32(Address(str, JSString::offsetOfFlags()), dest);
1234
1235 // Does not have a cached index value.
1236 branchTest32(Assembler::Zero, dest, Imm32(JSString::INDEX_VALUE_BIT), fail);
1237
1238 // Extract the index.
1239 rshift32(Imm32(JSString::INDEX_VALUE_SHIFT), dest);
1240 }
1241
loadChar(Register chars,Register index,Register dest,CharEncoding encoding,int32_t offset)1242 void MacroAssembler::loadChar(Register chars, Register index, Register dest,
1243 CharEncoding encoding, int32_t offset /* = 0 */) {
1244 if (encoding == CharEncoding::Latin1) {
1245 loadChar(BaseIndex(chars, index, TimesOne, offset), dest, encoding);
1246 } else {
1247 loadChar(BaseIndex(chars, index, TimesTwo, offset), dest, encoding);
1248 }
1249 }
1250
addToCharPtr(Register chars,Register index,CharEncoding encoding)1251 void MacroAssembler::addToCharPtr(Register chars, Register index,
1252 CharEncoding encoding) {
1253 if (encoding == CharEncoding::Latin1) {
1254 static_assert(sizeof(char) == 1,
1255 "Latin-1 string index shouldn't need scaling");
1256 addPtr(index, chars);
1257 } else {
1258 computeEffectiveAddress(BaseIndex(chars, index, TimesTwo), chars);
1259 }
1260 }
1261
loadBigIntDigits(Register bigInt,Register digits)1262 void MacroAssembler::loadBigIntDigits(Register bigInt, Register digits) {
1263 MOZ_ASSERT(digits != bigInt);
1264
1265 // Load the inline digits.
1266 computeEffectiveAddress(Address(bigInt, BigInt::offsetOfInlineDigits()),
1267 digits);
1268
1269 // If inline digits aren't used, load the heap digits. Use a conditional move
1270 // to prevent speculative execution.
1271 cmp32LoadPtr(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
1272 Imm32(int32_t(BigInt::inlineDigitsLength())),
1273 Address(bigInt, BigInt::offsetOfHeapDigits()), digits);
1274 }
1275
loadBigInt64(Register bigInt,Register64 dest)1276 void MacroAssembler::loadBigInt64(Register bigInt, Register64 dest) {
1277 // This code follows the implementation of |BigInt::toUint64()|. We're also
1278 // using it for inline callers of |BigInt::toInt64()|, which works, because
1279 // all supported Jit architectures use a two's complement representation for
1280 // int64 values, which means the WrapToSigned call in toInt64() is a no-op.
1281
1282 Label done, nonZero;
1283
1284 branchIfBigIntIsNonZero(bigInt, &nonZero);
1285 {
1286 move64(Imm64(0), dest);
1287 jump(&done);
1288 }
1289 bind(&nonZero);
1290
1291 #ifdef JS_PUNBOX64
1292 Register digits = dest.reg;
1293 #else
1294 Register digits = dest.high;
1295 #endif
1296
1297 loadBigIntDigits(bigInt, digits);
1298
1299 #if JS_PUNBOX64
1300 // Load the first digit into the destination register.
1301 load64(Address(digits, 0), dest);
1302 #else
1303 // Load the first digit into the destination register's low value.
1304 load32(Address(digits, 0), dest.low);
1305
1306 // And conditionally load the second digit into the high value register.
1307 Label twoDigits, digitsDone;
1308 branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
1309 Imm32(1), &twoDigits);
1310 {
1311 move32(Imm32(0), dest.high);
1312 jump(&digitsDone);
1313 }
1314 {
1315 bind(&twoDigits);
1316 load32(Address(digits, sizeof(BigInt::Digit)), dest.high);
1317 }
1318 bind(&digitsDone);
1319 #endif
1320
1321 branchTest32(Assembler::Zero, Address(bigInt, BigInt::offsetOfFlags()),
1322 Imm32(BigInt::signBitMask()), &done);
1323 neg64(dest);
1324
1325 bind(&done);
1326 }
1327
loadFirstBigIntDigitOrZero(Register bigInt,Register dest)1328 void MacroAssembler::loadFirstBigIntDigitOrZero(Register bigInt,
1329 Register dest) {
1330 Label done, nonZero;
1331 branchIfBigIntIsNonZero(bigInt, &nonZero);
1332 {
1333 movePtr(ImmWord(0), dest);
1334 jump(&done);
1335 }
1336 bind(&nonZero);
1337
1338 loadBigIntDigits(bigInt, dest);
1339
1340 // Load the first digit into the destination register.
1341 loadPtr(Address(dest, 0), dest);
1342
1343 bind(&done);
1344 }
1345
loadBigInt(Register bigInt,Register dest,Label * fail)1346 void MacroAssembler::loadBigInt(Register bigInt, Register dest, Label* fail) {
1347 Label done, nonZero;
1348 branchIfBigIntIsNonZero(bigInt, &nonZero);
1349 {
1350 movePtr(ImmWord(0), dest);
1351 jump(&done);
1352 }
1353 bind(&nonZero);
1354
1355 loadBigIntNonZero(bigInt, dest, fail);
1356
1357 bind(&done);
1358 }
1359
loadBigIntNonZero(Register bigInt,Register dest,Label * fail)1360 void MacroAssembler::loadBigIntNonZero(Register bigInt, Register dest,
1361 Label* fail) {
1362 MOZ_ASSERT(bigInt != dest);
1363
1364 #ifdef DEBUG
1365 Label nonZero;
1366 branchIfBigIntIsNonZero(bigInt, &nonZero);
1367 assumeUnreachable("Unexpected zero BigInt");
1368 bind(&nonZero);
1369 #endif
1370
1371 branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
1372 Imm32(1), fail);
1373
1374 static_assert(BigInt::inlineDigitsLength() > 0,
1375 "Single digit BigInts use inline storage");
1376
1377 // Load the first inline digit into the destination register.
1378 loadPtr(Address(bigInt, BigInt::offsetOfInlineDigits()), dest);
1379
1380 // Return as a signed pointer.
1381 bigIntDigitToSignedPtr(bigInt, dest, fail);
1382 }
1383
bigIntDigitToSignedPtr(Register bigInt,Register digit,Label * fail)1384 void MacroAssembler::bigIntDigitToSignedPtr(Register bigInt, Register digit,
1385 Label* fail) {
1386 // BigInt digits are stored as absolute numbers. Take the failure path when
1387 // the digit can't be stored in intptr_t.
1388 branchTestPtr(Assembler::Signed, digit, digit, fail);
1389
1390 // Negate |dest| when the BigInt is negative.
1391 Label nonNegative;
1392 branchIfBigIntIsNonNegative(bigInt, &nonNegative);
1393 negPtr(digit);
1394 bind(&nonNegative);
1395 }
1396
loadBigIntAbsolute(Register bigInt,Register dest,Label * fail)1397 void MacroAssembler::loadBigIntAbsolute(Register bigInt, Register dest,
1398 Label* fail) {
1399 MOZ_ASSERT(bigInt != dest);
1400
1401 branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
1402 Imm32(1), fail);
1403
1404 static_assert(BigInt::inlineDigitsLength() > 0,
1405 "Single digit BigInts use inline storage");
1406
1407 // Load the first inline digit into the destination register.
1408 movePtr(ImmWord(0), dest);
1409 cmp32LoadPtr(Assembler::NotEqual, Address(bigInt, BigInt::offsetOfLength()),
1410 Imm32(0), Address(bigInt, BigInt::offsetOfInlineDigits()), dest);
1411 }
1412
initializeBigInt64(Scalar::Type type,Register bigInt,Register64 val)1413 void MacroAssembler::initializeBigInt64(Scalar::Type type, Register bigInt,
1414 Register64 val) {
1415 MOZ_ASSERT(Scalar::isBigIntType(type));
1416
1417 store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
1418
1419 Label done, nonZero;
1420 branch64(Assembler::NotEqual, val, Imm64(0), &nonZero);
1421 {
1422 store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
1423 jump(&done);
1424 }
1425 bind(&nonZero);
1426
1427 if (type == Scalar::BigInt64) {
1428 // Set the sign-bit for negative values and then continue with the two's
1429 // complement.
1430 Label isPositive;
1431 branch64(Assembler::GreaterThan, val, Imm64(0), &isPositive);
1432 {
1433 store32(Imm32(BigInt::signBitMask()),
1434 Address(bigInt, BigInt::offsetOfFlags()));
1435 neg64(val);
1436 }
1437 bind(&isPositive);
1438 }
1439
1440 store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
1441
1442 static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
1443 "BigInt Digit size matches uintptr_t, so there's a single "
1444 "store on 64-bit and up to two stores on 32-bit");
1445
1446 #ifndef JS_PUNBOX64
1447 Label singleDigit;
1448 branchTest32(Assembler::Zero, val.high, val.high, &singleDigit);
1449 store32(Imm32(2), Address(bigInt, BigInt::offsetOfLength()));
1450 bind(&singleDigit);
1451
1452 // We can perform a single store64 on 32-bit platforms, because inline
1453 // storage can store at least two 32-bit integers.
1454 static_assert(BigInt::inlineDigitsLength() >= 2,
1455 "BigInt inline storage can store at least two digits");
1456 #endif
1457
1458 store64(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
1459
1460 bind(&done);
1461 }
1462
initializeBigInt(Register bigInt,Register val)1463 void MacroAssembler::initializeBigInt(Register bigInt, Register val) {
1464 store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
1465
1466 Label done, nonZero;
1467 branchTestPtr(Assembler::NonZero, val, val, &nonZero);
1468 {
1469 store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
1470 jump(&done);
1471 }
1472 bind(&nonZero);
1473
1474 // Set the sign-bit for negative values and then continue with the two's
1475 // complement.
1476 Label isPositive;
1477 branchTestPtr(Assembler::NotSigned, val, val, &isPositive);
1478 {
1479 store32(Imm32(BigInt::signBitMask()),
1480 Address(bigInt, BigInt::offsetOfFlags()));
1481 negPtr(val);
1482 }
1483 bind(&isPositive);
1484
1485 store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
1486
1487 static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
1488 "BigInt Digit size matches uintptr_t");
1489
1490 storePtr(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
1491
1492 bind(&done);
1493 }
1494
initializeBigIntAbsolute(Register bigInt,Register val)1495 void MacroAssembler::initializeBigIntAbsolute(Register bigInt, Register val) {
1496 store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
1497
1498 Label done, nonZero;
1499 branchTestPtr(Assembler::NonZero, val, val, &nonZero);
1500 {
1501 store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
1502 jump(&done);
1503 }
1504 bind(&nonZero);
1505
1506 store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
1507
1508 static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
1509 "BigInt Digit size matches uintptr_t");
1510
1511 storePtr(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
1512
1513 bind(&done);
1514 }
1515
copyBigIntWithInlineDigits(Register src,Register dest,Register temp,Label * fail,bool attemptNursery)1516 void MacroAssembler::copyBigIntWithInlineDigits(Register src, Register dest,
1517 Register temp, Label* fail,
1518 bool attemptNursery) {
1519 branch32(Assembler::Above, Address(src, BigInt::offsetOfLength()),
1520 Imm32(int32_t(BigInt::inlineDigitsLength())), fail);
1521
1522 newGCBigInt(dest, temp, fail, attemptNursery);
1523
1524 // Copy the sign-bit, but not any of the other bits used by the GC.
1525 load32(Address(src, BigInt::offsetOfFlags()), temp);
1526 and32(Imm32(BigInt::signBitMask()), temp);
1527 store32(temp, Address(dest, BigInt::offsetOfFlags()));
1528
1529 // Copy the length.
1530 load32(Address(src, BigInt::offsetOfLength()), temp);
1531 store32(temp, Address(dest, BigInt::offsetOfLength()));
1532
1533 // Copy the digits.
1534 Address srcDigits(src, js::BigInt::offsetOfInlineDigits());
1535 Address destDigits(dest, js::BigInt::offsetOfInlineDigits());
1536
1537 for (size_t i = 0; i < BigInt::inlineDigitsLength(); i++) {
1538 static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
1539 "BigInt Digit size matches uintptr_t");
1540
1541 loadPtr(srcDigits, temp);
1542 storePtr(temp, destDigits);
1543
1544 srcDigits = Address(src, srcDigits.offset + sizeof(BigInt::Digit));
1545 destDigits = Address(dest, destDigits.offset + sizeof(BigInt::Digit));
1546 }
1547 }
1548
compareBigIntAndInt32(JSOp op,Register bigInt,Register int32,Register scratch1,Register scratch2,Label * ifTrue,Label * ifFalse)1549 void MacroAssembler::compareBigIntAndInt32(JSOp op, Register bigInt,
1550 Register int32, Register scratch1,
1551 Register scratch2, Label* ifTrue,
1552 Label* ifFalse) {
1553 MOZ_ASSERT(IsLooseEqualityOp(op) || IsRelationalOp(op));
1554
1555 static_assert(std::is_same_v<BigInt::Digit, uintptr_t>,
1556 "BigInt digit can be loaded in a pointer-sized register");
1557 static_assert(sizeof(BigInt::Digit) >= sizeof(uint32_t),
1558 "BigInt digit stores at least an uint32");
1559
1560 // Test for too large numbers.
1561 //
1562 // If the absolute value of the BigInt can't be expressed in an uint32/uint64,
1563 // the result of the comparison is a constant.
1564 if (op == JSOp::Eq || op == JSOp::Ne) {
1565 Label* tooLarge = op == JSOp::Eq ? ifFalse : ifTrue;
1566 branch32(Assembler::GreaterThan,
1567 Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(1),
1568 tooLarge);
1569 } else {
1570 Label doCompare;
1571 branch32(Assembler::LessThanOrEqual,
1572 Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(1),
1573 &doCompare);
1574
1575 // Still need to take the sign-bit into account for relational operations.
1576 if (op == JSOp::Lt || op == JSOp::Le) {
1577 branchIfBigIntIsNegative(bigInt, ifTrue);
1578 jump(ifFalse);
1579 } else {
1580 branchIfBigIntIsNegative(bigInt, ifFalse);
1581 jump(ifTrue);
1582 }
1583
1584 bind(&doCompare);
1585 }
1586
1587 // Test for mismatched signs and, if the signs are equal, load |abs(x)| in
1588 // |scratch1| and |abs(y)| in |scratch2| and then compare the absolute numbers
1589 // against each other.
1590 {
1591 // Jump to |ifTrue| resp. |ifFalse| if the BigInt is strictly less than
1592 // resp. strictly greater than the int32 value, depending on the comparison
1593 // operator.
1594 Label* greaterThan;
1595 Label* lessThan;
1596 if (op == JSOp::Eq) {
1597 greaterThan = ifFalse;
1598 lessThan = ifFalse;
1599 } else if (op == JSOp::Ne) {
1600 greaterThan = ifTrue;
1601 lessThan = ifTrue;
1602 } else if (op == JSOp::Lt || op == JSOp::Le) {
1603 greaterThan = ifFalse;
1604 lessThan = ifTrue;
1605 } else {
1606 MOZ_ASSERT(op == JSOp::Gt || op == JSOp::Ge);
1607 greaterThan = ifTrue;
1608 lessThan = ifFalse;
1609 }
1610
1611 // BigInt digits are always stored as an absolute number.
1612 loadFirstBigIntDigitOrZero(bigInt, scratch1);
1613
1614 // Load the int32 into |scratch2| and negate it for negative numbers.
1615 move32(int32, scratch2);
1616
1617 Label isNegative, doCompare;
1618 branchIfBigIntIsNegative(bigInt, &isNegative);
1619 branch32(Assembler::LessThan, int32, Imm32(0), greaterThan);
1620 jump(&doCompare);
1621
1622 // We rely on |neg32(INT32_MIN)| staying INT32_MIN, because we're using an
1623 // unsigned comparison below.
1624 bind(&isNegative);
1625 branch32(Assembler::GreaterThanOrEqual, int32, Imm32(0), lessThan);
1626 neg32(scratch2);
1627
1628 // Not all supported platforms (e.g. MIPS64) zero-extend 32-bit operations,
1629 // so we need to explicitly clear any high 32-bits.
1630 move32ZeroExtendToPtr(scratch2, scratch2);
1631
1632 // Reverse the relational comparator for negative numbers.
1633 // |-x < -y| <=> |+x > +y|.
1634 // |-x ≤ -y| <=> |+x ≥ +y|.
1635 // |-x > -y| <=> |+x < +y|.
1636 // |-x ≥ -y| <=> |+x ≤ +y|.
1637 JSOp reversed = ReverseCompareOp(op);
1638 if (reversed != op) {
1639 branchPtr(JSOpToCondition(reversed, /* isSigned = */ false), scratch1,
1640 scratch2, ifTrue);
1641 jump(ifFalse);
1642 }
1643
1644 bind(&doCompare);
1645 branchPtr(JSOpToCondition(op, /* isSigned = */ false), scratch1, scratch2,
1646 ifTrue);
1647 }
1648 }
1649
typeOfObject(Register obj,Register scratch,Label * slow,Label * isObject,Label * isCallable,Label * isUndefined)1650 void MacroAssembler::typeOfObject(Register obj, Register scratch, Label* slow,
1651 Label* isObject, Label* isCallable,
1652 Label* isUndefined) {
1653 loadObjClassUnsafe(obj, scratch);
1654
1655 // Proxies can emulate undefined and have complex isCallable behavior.
1656 branchTestClassIsProxy(true, scratch, slow);
1657
1658 // JSFunctions are always callable.
1659 branchPtr(Assembler::Equal, scratch, ImmPtr(&JSFunction::class_), isCallable);
1660
1661 // Objects that emulate undefined.
1662 Address flags(scratch, JSClass::offsetOfFlags());
1663 branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_EMULATES_UNDEFINED),
1664 isUndefined);
1665
1666 // Handle classes with a call hook.
1667 branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClass, cOps)),
1668 ImmPtr(nullptr), isObject);
1669
1670 loadPtr(Address(scratch, offsetof(JSClass, cOps)), scratch);
1671 branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClassOps, call)),
1672 ImmPtr(nullptr), isObject);
1673
1674 jump(isCallable);
1675 }
1676
isCallableOrConstructor(bool isCallable,Register obj,Register output,Label * isProxy)1677 void MacroAssembler::isCallableOrConstructor(bool isCallable, Register obj,
1678 Register output, Label* isProxy) {
1679 MOZ_ASSERT(obj != output);
1680
1681 Label notFunction, hasCOps, done;
1682 loadObjClassUnsafe(obj, output);
1683
1684 // An object is callable iff:
1685 // is<JSFunction>() || (getClass()->cOps && getClass()->cOps->call).
1686 // An object is constructor iff:
1687 // ((is<JSFunction>() && as<JSFunction>().isConstructor) ||
1688 // (getClass()->cOps && getClass()->cOps->construct)).
1689 branchPtr(Assembler::NotEqual, output, ImmPtr(&JSFunction::class_),
1690 ¬Function);
1691 if (isCallable) {
1692 move32(Imm32(1), output);
1693 } else {
1694 static_assert(mozilla::IsPowerOfTwo(uint32_t(FunctionFlags::CONSTRUCTOR)),
1695 "FunctionFlags::CONSTRUCTOR has only one bit set");
1696
1697 load16ZeroExtend(Address(obj, JSFunction::offsetOfFlags()), output);
1698 rshift32(Imm32(mozilla::FloorLog2(uint32_t(FunctionFlags::CONSTRUCTOR))),
1699 output);
1700 and32(Imm32(1), output);
1701 }
1702 jump(&done);
1703
1704 bind(¬Function);
1705
1706 // Just skim proxies off. Their notion of isCallable()/isConstructor() is
1707 // more complicated.
1708 branchTestClassIsProxy(true, output, isProxy);
1709
1710 branchPtr(Assembler::NonZero, Address(output, offsetof(JSClass, cOps)),
1711 ImmPtr(nullptr), &hasCOps);
1712 move32(Imm32(0), output);
1713 jump(&done);
1714
1715 bind(&hasCOps);
1716 loadPtr(Address(output, offsetof(JSClass, cOps)), output);
1717 size_t opsOffset =
1718 isCallable ? offsetof(JSClassOps, call) : offsetof(JSClassOps, construct);
1719 cmpPtrSet(Assembler::NonZero, Address(output, opsOffset), ImmPtr(nullptr),
1720 output);
1721
1722 bind(&done);
1723 }
1724
loadJSContext(Register dest)1725 void MacroAssembler::loadJSContext(Register dest) {
1726 JitContext* jcx = GetJitContext();
1727 movePtr(ImmPtr(jcx->runtime->mainContextPtr()), dest);
1728 }
1729
ContextRealmPtr()1730 static const uint8_t* ContextRealmPtr() {
1731 return (
1732 static_cast<const uint8_t*>(GetJitContext()->runtime->mainContextPtr()) +
1733 JSContext::offsetOfRealm());
1734 }
1735
switchToRealm(Register realm)1736 void MacroAssembler::switchToRealm(Register realm) {
1737 storePtr(realm, AbsoluteAddress(ContextRealmPtr()));
1738 }
1739
switchToRealm(const void * realm,Register scratch)1740 void MacroAssembler::switchToRealm(const void* realm, Register scratch) {
1741 MOZ_ASSERT(realm);
1742
1743 movePtr(ImmPtr(realm), scratch);
1744 switchToRealm(scratch);
1745 }
1746
switchToObjectRealm(Register obj,Register scratch)1747 void MacroAssembler::switchToObjectRealm(Register obj, Register scratch) {
1748 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
1749 loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
1750 loadPtr(Address(scratch, BaseShape::offsetOfRealm()), scratch);
1751 switchToRealm(scratch);
1752 }
1753
switchToBaselineFrameRealm(Register scratch)1754 void MacroAssembler::switchToBaselineFrameRealm(Register scratch) {
1755 Address envChain(BaselineFrameReg,
1756 BaselineFrame::reverseOffsetOfEnvironmentChain());
1757 loadPtr(envChain, scratch);
1758 switchToObjectRealm(scratch, scratch);
1759 }
1760
switchToWasmTlsRealm(Register scratch1,Register scratch2)1761 void MacroAssembler::switchToWasmTlsRealm(Register scratch1,
1762 Register scratch2) {
1763 loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), scratch1);
1764 loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, realm)), scratch2);
1765 storePtr(scratch2, Address(scratch1, JSContext::offsetOfRealm()));
1766 }
1767
debugAssertContextRealm(const void * realm,Register scratch)1768 void MacroAssembler::debugAssertContextRealm(const void* realm,
1769 Register scratch) {
1770 #ifdef DEBUG
1771 Label ok;
1772 movePtr(ImmPtr(realm), scratch);
1773 branchPtr(Assembler::Equal, AbsoluteAddress(ContextRealmPtr()), scratch, &ok);
1774 assumeUnreachable("Unexpected context realm");
1775 bind(&ok);
1776 #endif
1777 }
1778
setIsCrossRealmArrayConstructor(Register obj,Register output)1779 void MacroAssembler::setIsCrossRealmArrayConstructor(Register obj,
1780 Register output) {
1781 #ifdef DEBUG
1782 Label notProxy;
1783 branchTestObjectIsProxy(false, obj, output, ¬Proxy);
1784 assumeUnreachable("Unexpected proxy in setIsCrossRealmArrayConstructor");
1785 bind(¬Proxy);
1786 #endif
1787
1788 // The object's realm must not be cx->realm.
1789 Label isFalse, done;
1790 loadPtr(Address(obj, JSObject::offsetOfShape()), output);
1791 loadPtr(Address(output, Shape::offsetOfBaseShape()), output);
1792 loadPtr(Address(output, BaseShape::offsetOfRealm()), output);
1793 branchPtr(Assembler::Equal, AbsoluteAddress(ContextRealmPtr()), output,
1794 &isFalse);
1795
1796 // The object must be a function.
1797 branchTestObjClass(Assembler::NotEqual, obj, &JSFunction::class_, output, obj,
1798 &isFalse);
1799
1800 // The function must be the ArrayConstructor native.
1801 branchPtr(Assembler::NotEqual,
1802 Address(obj, JSFunction::offsetOfNativeOrEnv()),
1803 ImmPtr(js::ArrayConstructor), &isFalse);
1804
1805 move32(Imm32(1), output);
1806 jump(&done);
1807
1808 bind(&isFalse);
1809 move32(Imm32(0), output);
1810
1811 bind(&done);
1812 }
1813
setIsDefinitelyTypedArrayConstructor(Register obj,Register output)1814 void MacroAssembler::setIsDefinitelyTypedArrayConstructor(Register obj,
1815 Register output) {
1816 Label isFalse, isTrue, done;
1817
1818 // The object must be a function. (Wrappers are not supported.)
1819 branchTestObjClass(Assembler::NotEqual, obj, &JSFunction::class_, output, obj,
1820 &isFalse);
1821
1822 // Load the native into |output|.
1823 loadPtr(Address(obj, JSFunction::offsetOfNativeOrEnv()), output);
1824
1825 auto branchIsTypedArrayCtor = [&](Scalar::Type type) {
1826 // The function must be a TypedArrayConstructor native (from any realm).
1827 JSNative constructor = TypedArrayConstructorNative(type);
1828 branchPtr(Assembler::Equal, output, ImmPtr(constructor), &isTrue);
1829 };
1830
1831 #define TYPED_ARRAY_CONSTRUCTOR_NATIVE(T, N) branchIsTypedArrayCtor(Scalar::N);
1832 JS_FOR_EACH_TYPED_ARRAY(TYPED_ARRAY_CONSTRUCTOR_NATIVE)
1833 #undef TYPED_ARRAY_CONSTRUCTOR_NATIVE
1834
1835 // Falls through to the false case.
1836
1837 bind(&isFalse);
1838 move32(Imm32(0), output);
1839 jump(&done);
1840
1841 bind(&isTrue);
1842 move32(Imm32(1), output);
1843
1844 bind(&done);
1845 }
1846
guardNonNegativeIntPtrToInt32(Register reg,Label * fail)1847 void MacroAssembler::guardNonNegativeIntPtrToInt32(Register reg, Label* fail) {
1848 #ifdef DEBUG
1849 Label ok;
1850 branchPtr(Assembler::NotSigned, reg, reg, &ok);
1851 assumeUnreachable("Unexpected negative value");
1852 bind(&ok);
1853 #endif
1854
1855 #ifdef JS_64BIT
1856 branchPtr(Assembler::Above, reg, Imm32(INT32_MAX), fail);
1857 #endif
1858 }
1859
loadArrayBufferByteLengthIntPtr(Register obj,Register output)1860 void MacroAssembler::loadArrayBufferByteLengthIntPtr(Register obj,
1861 Register output) {
1862 Address slotAddr(obj, ArrayBufferObject::offsetOfByteLengthSlot());
1863 loadPrivate(slotAddr, output);
1864 }
1865
loadArrayBufferViewByteOffsetIntPtr(Register obj,Register output)1866 void MacroAssembler::loadArrayBufferViewByteOffsetIntPtr(Register obj,
1867 Register output) {
1868 Address slotAddr(obj, ArrayBufferViewObject::byteOffsetOffset());
1869 loadPrivate(slotAddr, output);
1870 }
1871
loadArrayBufferViewLengthIntPtr(Register obj,Register output)1872 void MacroAssembler::loadArrayBufferViewLengthIntPtr(Register obj,
1873 Register output) {
1874 Address slotAddr(obj, ArrayBufferViewObject::lengthOffset());
1875 loadPrivate(slotAddr, output);
1876 }
1877
loadDOMExpandoValueGuardGeneration(Register obj,ValueOperand output,JS::ExpandoAndGeneration * expandoAndGeneration,uint64_t generation,Label * fail)1878 void MacroAssembler::loadDOMExpandoValueGuardGeneration(
1879 Register obj, ValueOperand output,
1880 JS::ExpandoAndGeneration* expandoAndGeneration, uint64_t generation,
1881 Label* fail) {
1882 loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
1883 output.scratchReg());
1884 loadValue(Address(output.scratchReg(),
1885 js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
1886 output);
1887
1888 // Guard the ExpandoAndGeneration* matches the proxy's ExpandoAndGeneration
1889 // privateSlot.
1890 branchTestValue(Assembler::NotEqual, output,
1891 PrivateValue(expandoAndGeneration), fail);
1892
1893 // Guard expandoAndGeneration->generation matches the expected generation.
1894 Address generationAddr(output.payloadOrValueReg(),
1895 JS::ExpandoAndGeneration::offsetOfGeneration());
1896 branch64(Assembler::NotEqual, generationAddr, Imm64(generation), fail);
1897
1898 // Load expandoAndGeneration->expando into the output Value register.
1899 loadValue(Address(output.payloadOrValueReg(),
1900 JS::ExpandoAndGeneration::offsetOfExpando()),
1901 output);
1902 }
1903
loadJitActivation(Register dest)1904 void MacroAssembler::loadJitActivation(Register dest) {
1905 loadJSContext(dest);
1906 loadPtr(Address(dest, offsetof(JSContext, activation_)), dest);
1907 }
1908
guardSpecificAtom(Register str,JSAtom * atom,Register scratch,const LiveRegisterSet & volatileRegs,Label * fail)1909 void MacroAssembler::guardSpecificAtom(Register str, JSAtom* atom,
1910 Register scratch,
1911 const LiveRegisterSet& volatileRegs,
1912 Label* fail) {
1913 Label done;
1914 branchPtr(Assembler::Equal, str, ImmGCPtr(atom), &done);
1915
1916 // The pointers are not equal, so if the input string is also an atom it
1917 // must be a different string.
1918 branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
1919 Imm32(JSString::ATOM_BIT), fail);
1920
1921 // Check the length.
1922 branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
1923 Imm32(atom->length()), fail);
1924
1925 // We have a non-atomized string with the same length. Call a helper
1926 // function to do the comparison.
1927 PushRegsInMask(volatileRegs);
1928
1929 using Fn = bool (*)(JSString * str1, JSString * str2);
1930 setupUnalignedABICall(scratch);
1931 movePtr(ImmGCPtr(atom), scratch);
1932 passABIArg(scratch);
1933 passABIArg(str);
1934 callWithABI<Fn, EqualStringsHelperPure>();
1935 mov(ReturnReg, scratch);
1936
1937 MOZ_ASSERT(!volatileRegs.has(scratch));
1938 PopRegsInMask(volatileRegs);
1939 branchIfFalseBool(scratch, fail);
1940
1941 bind(&done);
1942 }
1943
guardStringToInt32(Register str,Register output,Register scratch,LiveRegisterSet volatileRegs,Label * fail)1944 void MacroAssembler::guardStringToInt32(Register str, Register output,
1945 Register scratch,
1946 LiveRegisterSet volatileRegs,
1947 Label* fail) {
1948 Label vmCall, done;
1949 // Use indexed value as fast path if possible.
1950 loadStringIndexValue(str, output, &vmCall);
1951 jump(&done);
1952 {
1953 bind(&vmCall);
1954
1955 // Reserve space for holding the result int32_t of the call. Use
1956 // pointer-size to avoid misaligning the stack on 64-bit platforms.
1957 reserveStack(sizeof(uintptr_t));
1958 moveStackPtrTo(output);
1959
1960 volatileRegs.takeUnchecked(scratch);
1961 if (output.volatile_()) {
1962 volatileRegs.addUnchecked(output);
1963 }
1964 PushRegsInMask(volatileRegs);
1965
1966 using Fn = bool (*)(JSContext * cx, JSString * str, int32_t * result);
1967 setupUnalignedABICall(scratch);
1968 loadJSContext(scratch);
1969 passABIArg(scratch);
1970 passABIArg(str);
1971 passABIArg(output);
1972 callWithABI<Fn, GetInt32FromStringPure>();
1973 mov(ReturnReg, scratch);
1974
1975 PopRegsInMask(volatileRegs);
1976
1977 Label ok;
1978 branchIfTrueBool(scratch, &ok);
1979 {
1980 // OOM path, recovered by GetInt32FromStringPure.
1981 //
1982 // Use addToStackPtr instead of freeStack as freeStack tracks stack height
1983 // flow-insensitively, and using it twice would confuse the stack height
1984 // tracking.
1985 addToStackPtr(Imm32(sizeof(uintptr_t)));
1986 jump(fail);
1987 }
1988 bind(&ok);
1989 load32(Address(output, 0), output);
1990 freeStack(sizeof(uintptr_t));
1991 }
1992 bind(&done);
1993 }
1994
generateBailoutTail(Register scratch,Register bailoutInfo)1995 void MacroAssembler::generateBailoutTail(Register scratch,
1996 Register bailoutInfo) {
1997 loadJSContext(scratch);
1998 enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
1999
2000 branchIfFalseBool(ReturnReg, exceptionLabel());
2001
2002 // Finish bailing out to Baseline.
2003 {
2004 // Prepare a register set for use in this case.
2005 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2006 MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()),
2007 !regs.has(AsRegister(getStackPointer())));
2008 regs.take(bailoutInfo);
2009
2010 // Reset SP to the point where clobbering starts.
2011 loadStackPtr(
2012 Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)));
2013
2014 Register copyCur = regs.takeAny();
2015 Register copyEnd = regs.takeAny();
2016 Register temp = regs.takeAny();
2017
2018 // Copy data onto stack.
2019 loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)),
2020 copyCur);
2021 loadPtr(
2022 Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)),
2023 copyEnd);
2024 {
2025 Label copyLoop;
2026 Label endOfCopy;
2027 bind(©Loop);
2028 branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
2029 subPtr(Imm32(4), copyCur);
2030 subFromStackPtr(Imm32(4));
2031 load32(Address(copyCur, 0), temp);
2032 store32(temp, Address(getStackPointer(), 0));
2033 jump(©Loop);
2034 bind(&endOfCopy);
2035 }
2036
2037 // Enter exit frame for the FinishBailoutToBaseline call.
2038 load32(Address(bailoutInfo,
2039 offsetof(BaselineBailoutInfo, frameSizeOfInnerMostFrame)),
2040 temp);
2041 makeFrameDescriptor(temp, FrameType::BaselineJS, ExitFrameLayout::Size());
2042 push(temp);
2043 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
2044 // No GC things to mark on the stack, push a bare token.
2045 loadJSContext(scratch);
2046 enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
2047
2048 // Save needed values onto stack temporarily.
2049 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
2050 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
2051
2052 // Call a stub to free allocated memory and create arguments objects.
2053 using Fn = bool (*)(BaselineBailoutInfo * bailoutInfoArg);
2054 setupUnalignedABICall(temp);
2055 passABIArg(bailoutInfo);
2056 callWithABI<Fn, FinishBailoutToBaseline>(
2057 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
2058 branchIfFalseBool(ReturnReg, exceptionLabel());
2059
2060 // Restore values where they need to be and resume execution.
2061 AllocatableGeneralRegisterSet enterRegs(GeneralRegisterSet::All());
2062 enterRegs.take(BaselineFrameReg);
2063 Register jitcodeReg = enterRegs.takeAny();
2064
2065 pop(jitcodeReg);
2066 pop(BaselineFrameReg);
2067
2068 // Discard exit frame.
2069 addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
2070
2071 jump(jitcodeReg);
2072 }
2073 }
2074
assertRectifierFrameParentType(Register frameType)2075 void MacroAssembler::assertRectifierFrameParentType(Register frameType) {
2076 #ifdef DEBUG
2077 {
2078 // Check the possible previous frame types here.
2079 Label checkOk;
2080 branch32(Assembler::Equal, frameType, Imm32(FrameType::IonJS), &checkOk);
2081 branch32(Assembler::Equal, frameType, Imm32(FrameType::BaselineStub),
2082 &checkOk);
2083 branch32(Assembler::Equal, frameType, Imm32(FrameType::WasmToJSJit),
2084 &checkOk);
2085 branch32(Assembler::Equal, frameType, Imm32(FrameType::CppToJSJit),
2086 &checkOk);
2087 assumeUnreachable("Unrecognized frame type preceding RectifierFrame.");
2088 bind(&checkOk);
2089 }
2090 #endif
2091 }
2092
loadJitCodeRaw(Register func,Register dest)2093 void MacroAssembler::loadJitCodeRaw(Register func, Register dest) {
2094 static_assert(BaseScript::offsetOfJitCodeRaw() ==
2095 SelfHostedLazyScript::offsetOfJitCodeRaw(),
2096 "SelfHostedLazyScript and BaseScript must use same layout for "
2097 "jitCodeRaw_");
2098 loadPtr(Address(func, JSFunction::offsetOfScript()), dest);
2099 loadPtr(Address(dest, BaseScript::offsetOfJitCodeRaw()), dest);
2100 }
2101
loadBaselineJitCodeRaw(Register func,Register dest,Label * failure)2102 void MacroAssembler::loadBaselineJitCodeRaw(Register func, Register dest,
2103 Label* failure) {
2104 // Load JitScript
2105 loadPtr(Address(func, JSFunction::offsetOfScript()), dest);
2106 if (failure) {
2107 branchIfScriptHasNoJitScript(dest, failure);
2108 }
2109 loadJitScript(dest, dest);
2110
2111 // Load BaselineScript
2112 loadPtr(Address(dest, JitScript::offsetOfBaselineScript()), dest);
2113 if (failure) {
2114 static_assert(BaselineDisabledScript == 0x1);
2115 branchPtr(Assembler::BelowOrEqual, dest, ImmWord(BaselineDisabledScript),
2116 failure);
2117 }
2118
2119 // Load Baseline jitcode
2120 loadPtr(Address(dest, BaselineScript::offsetOfMethod()), dest);
2121 loadPtr(Address(dest, JitCode::offsetOfCode()), dest);
2122 }
2123
loadBaselineFramePtr(Register framePtr,Register dest)2124 void MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest) {
2125 if (framePtr != dest) {
2126 movePtr(framePtr, dest);
2127 }
2128 subPtr(Imm32(BaselineFrame::Size()), dest);
2129 }
2130
ContextInlinedICScriptPtr()2131 static const uint8_t* ContextInlinedICScriptPtr() {
2132 return (
2133 static_cast<const uint8_t*>(GetJitContext()->runtime->mainContextPtr()) +
2134 JSContext::offsetOfInlinedICScript());
2135 }
2136
storeICScriptInJSContext(Register icScript)2137 void MacroAssembler::storeICScriptInJSContext(Register icScript) {
2138 storePtr(icScript, AbsoluteAddress(ContextInlinedICScriptPtr()));
2139 }
2140
handleFailure()2141 void MacroAssembler::handleFailure() {
2142 // Re-entry code is irrelevant because the exception will leave the
2143 // running function and never come back
2144 TrampolinePtr excTail =
2145 GetJitContext()->runtime->jitRuntime()->getExceptionTail();
2146 jump(excTail);
2147 }
2148
assumeUnreachable(const char * output)2149 void MacroAssembler::assumeUnreachable(const char* output) {
2150 #ifdef JS_MASM_VERBOSE
2151 if (!IsCompilingWasm()) {
2152 AllocatableRegisterSet regs(RegisterSet::Volatile());
2153 LiveRegisterSet save(regs.asLiveSet());
2154 PushRegsInMask(save);
2155 Register temp = regs.takeAnyGeneral();
2156
2157 using Fn = void (*)(const char* output);
2158 setupUnalignedABICall(temp);
2159 movePtr(ImmPtr(output), temp);
2160 passABIArg(temp);
2161 callWithABI<Fn, AssumeUnreachable>(MoveOp::GENERAL,
2162 CheckUnsafeCallWithABI::DontCheckOther);
2163
2164 PopRegsInMask(save);
2165 }
2166 #endif
2167
2168 breakpoint();
2169 }
2170
printf(const char * output)2171 void MacroAssembler::printf(const char* output) {
2172 #ifdef JS_MASM_VERBOSE
2173 AllocatableRegisterSet regs(RegisterSet::Volatile());
2174 LiveRegisterSet save(regs.asLiveSet());
2175 PushRegsInMask(save);
2176
2177 Register temp = regs.takeAnyGeneral();
2178
2179 using Fn = void (*)(const char* output);
2180 setupUnalignedABICall(temp);
2181 movePtr(ImmPtr(output), temp);
2182 passABIArg(temp);
2183 callWithABI<Fn, Printf0>();
2184
2185 PopRegsInMask(save);
2186 #endif
2187 }
2188
printf(const char * output,Register value)2189 void MacroAssembler::printf(const char* output, Register value) {
2190 #ifdef JS_MASM_VERBOSE
2191 AllocatableRegisterSet regs(RegisterSet::Volatile());
2192 LiveRegisterSet save(regs.asLiveSet());
2193 PushRegsInMask(save);
2194
2195 regs.takeUnchecked(value);
2196
2197 Register temp = regs.takeAnyGeneral();
2198
2199 using Fn = void (*)(const char* output, uintptr_t value);
2200 setupUnalignedABICall(temp);
2201 movePtr(ImmPtr(output), temp);
2202 passABIArg(temp);
2203 passABIArg(value);
2204 callWithABI<Fn, Printf1>();
2205
2206 PopRegsInMask(save);
2207 #endif
2208 }
2209
2210 #ifdef JS_TRACE_LOGGING
loadTraceLogger(Register logger)2211 void MacroAssembler::loadTraceLogger(Register logger) {
2212 loadJSContext(logger);
2213 loadPtr(Address(logger, offsetof(JSContext, traceLogger)), logger);
2214 }
2215
tracelogStartId(Register logger,uint32_t textId,bool force)2216 void MacroAssembler::tracelogStartId(Register logger, uint32_t textId,
2217 bool force) {
2218 if (!force && !TraceLogTextIdEnabled(textId)) {
2219 return;
2220 }
2221
2222 AllocatableRegisterSet regs(RegisterSet::Volatile());
2223 LiveRegisterSet save(regs.asLiveSet());
2224 PushRegsInMask(save);
2225 regs.takeUnchecked(logger);
2226
2227 Register temp = regs.takeAnyGeneral();
2228
2229 using Fn = void (*)(TraceLoggerThread * logger, uint32_t id);
2230 setupUnalignedABICall(temp);
2231 passABIArg(logger);
2232 move32(Imm32(textId), temp);
2233 passABIArg(temp);
2234 callWithABI<Fn, TraceLogStartEventPrivate>(
2235 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
2236
2237 PopRegsInMask(save);
2238 }
2239
tracelogStartId(Register logger,Register textId)2240 void MacroAssembler::tracelogStartId(Register logger, Register textId) {
2241 AllocatableRegisterSet regs(RegisterSet::Volatile());
2242 LiveRegisterSet save(regs.asLiveSet());
2243 PushRegsInMask(save);
2244 regs.takeUnchecked(logger);
2245 regs.takeUnchecked(textId);
2246
2247 Register temp = regs.takeAnyGeneral();
2248
2249 using Fn = void (*)(TraceLoggerThread * logger, uint32_t id);
2250 setupUnalignedABICall(temp);
2251 passABIArg(logger);
2252 passABIArg(textId);
2253 callWithABI<Fn, TraceLogStartEventPrivate>(
2254 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
2255
2256 PopRegsInMask(save);
2257 }
2258
tracelogStartEvent(Register logger,Register event)2259 void MacroAssembler::tracelogStartEvent(Register logger, Register event) {
2260 AllocatableRegisterSet regs(RegisterSet::Volatile());
2261 LiveRegisterSet save(regs.asLiveSet());
2262 PushRegsInMask(save);
2263 regs.takeUnchecked(logger);
2264 regs.takeUnchecked(event);
2265
2266 Register temp = regs.takeAnyGeneral();
2267
2268 using Fn = void (*)(TraceLoggerThread*, const TraceLoggerEvent&);
2269 setupUnalignedABICall(temp);
2270 passABIArg(logger);
2271 passABIArg(event);
2272 callWithABI<Fn, TraceLogStartEvent>(MoveOp::GENERAL,
2273 CheckUnsafeCallWithABI::DontCheckOther);
2274
2275 PopRegsInMask(save);
2276 }
2277
tracelogStopId(Register logger,uint32_t textId,bool force)2278 void MacroAssembler::tracelogStopId(Register logger, uint32_t textId,
2279 bool force) {
2280 if (!force && !TraceLogTextIdEnabled(textId)) {
2281 return;
2282 }
2283
2284 AllocatableRegisterSet regs(RegisterSet::Volatile());
2285 LiveRegisterSet save(regs.asLiveSet());
2286 PushRegsInMask(save);
2287 regs.takeUnchecked(logger);
2288
2289 Register temp = regs.takeAnyGeneral();
2290
2291 using Fn = void (*)(TraceLoggerThread * logger, uint32_t id);
2292 setupUnalignedABICall(temp);
2293 passABIArg(logger);
2294 move32(Imm32(textId), temp);
2295 passABIArg(temp);
2296
2297 callWithABI<Fn, TraceLogStopEventPrivate>(
2298 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
2299
2300 PopRegsInMask(save);
2301 }
2302
tracelogStopId(Register logger,Register textId)2303 void MacroAssembler::tracelogStopId(Register logger, Register textId) {
2304 AllocatableRegisterSet regs(RegisterSet::Volatile());
2305 LiveRegisterSet save(regs.asLiveSet());
2306 PushRegsInMask(save);
2307 regs.takeUnchecked(logger);
2308 regs.takeUnchecked(textId);
2309
2310 Register temp = regs.takeAnyGeneral();
2311
2312 using Fn = void (*)(TraceLoggerThread * logger, uint32_t id);
2313 setupUnalignedABICall(temp);
2314 passABIArg(logger);
2315 passABIArg(textId);
2316 callWithABI<Fn, TraceLogStopEventPrivate>(
2317 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
2318
2319 PopRegsInMask(save);
2320 }
2321 #endif
2322
convertInt32ValueToDouble(ValueOperand val)2323 void MacroAssembler::convertInt32ValueToDouble(ValueOperand val) {
2324 Label done;
2325 branchTestInt32(Assembler::NotEqual, val, &done);
2326 unboxInt32(val, val.scratchReg());
2327 ScratchDoubleScope fpscratch(*this);
2328 convertInt32ToDouble(val.scratchReg(), fpscratch);
2329 boxDouble(fpscratch, val, fpscratch);
2330 bind(&done);
2331 }
2332
convertValueToFloatingPoint(ValueOperand value,FloatRegister output,Label * fail,MIRType outputType)2333 void MacroAssembler::convertValueToFloatingPoint(ValueOperand value,
2334 FloatRegister output,
2335 Label* fail,
2336 MIRType outputType) {
2337 Label isDouble, isInt32, isBool, isNull, done;
2338
2339 {
2340 ScratchTagScope tag(*this, value);
2341 splitTagForTest(value, tag);
2342
2343 branchTestDouble(Assembler::Equal, tag, &isDouble);
2344 branchTestInt32(Assembler::Equal, tag, &isInt32);
2345 branchTestBoolean(Assembler::Equal, tag, &isBool);
2346 branchTestNull(Assembler::Equal, tag, &isNull);
2347 branchTestUndefined(Assembler::NotEqual, tag, fail);
2348 }
2349
2350 // fall-through: undefined
2351 loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output,
2352 outputType);
2353 jump(&done);
2354
2355 bind(&isNull);
2356 loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
2357 jump(&done);
2358
2359 bind(&isBool);
2360 boolValueToFloatingPoint(value, output, outputType);
2361 jump(&done);
2362
2363 bind(&isInt32);
2364 int32ValueToFloatingPoint(value, output, outputType);
2365 jump(&done);
2366
2367 // On some non-multiAlias platforms, unboxDouble may use the scratch register,
2368 // so do not merge code paths here.
2369 bind(&isDouble);
2370 if (outputType == MIRType::Float32 && hasMultiAlias()) {
2371 ScratchDoubleScope tmp(*this);
2372 unboxDouble(value, tmp);
2373 convertDoubleToFloat32(tmp, output);
2374 } else {
2375 FloatRegister tmp = output.asDouble();
2376 unboxDouble(value, tmp);
2377 if (outputType == MIRType::Float32) {
2378 convertDoubleToFloat32(tmp, output);
2379 }
2380 }
2381
2382 bind(&done);
2383 }
2384
outOfLineTruncateSlow(FloatRegister src,Register dest,bool widenFloatToDouble,bool compilingWasm,wasm::BytecodeOffset callOffset)2385 void MacroAssembler::outOfLineTruncateSlow(FloatRegister src, Register dest,
2386 bool widenFloatToDouble,
2387 bool compilingWasm,
2388 wasm::BytecodeOffset callOffset) {
2389 if (compilingWasm) {
2390 Push(WasmTlsReg);
2391 }
2392 int32_t framePushedAfterTls = framePushed();
2393
2394 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
2395 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
2396 ScratchDoubleScope fpscratch(*this);
2397 if (widenFloatToDouble) {
2398 convertFloat32ToDouble(src, fpscratch);
2399 src = fpscratch;
2400 }
2401 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2402 FloatRegister srcSingle;
2403 if (widenFloatToDouble) {
2404 MOZ_ASSERT(src.isSingle());
2405 srcSingle = src;
2406 src = src.asDouble();
2407 Push(srcSingle);
2408 convertFloat32ToDouble(srcSingle, src);
2409 }
2410 #else
2411 // Also see below
2412 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
2413 #endif
2414
2415 MOZ_ASSERT(src.isDouble());
2416
2417 if (compilingWasm) {
2418 int32_t tlsOffset = framePushed() - framePushedAfterTls;
2419 setupWasmABICall();
2420 passABIArg(src, MoveOp::DOUBLE);
2421 callWithABI(callOffset, wasm::SymbolicAddress::ToInt32,
2422 mozilla::Some(tlsOffset));
2423 } else {
2424 using Fn = int32_t (*)(double);
2425 setupUnalignedABICall(dest);
2426 passABIArg(src, MoveOp::DOUBLE);
2427 callWithABI<Fn, JS::ToInt32>(MoveOp::GENERAL,
2428 CheckUnsafeCallWithABI::DontCheckOther);
2429 }
2430 storeCallInt32Result(dest);
2431
2432 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
2433 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
2434 // Nothing
2435 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2436 if (widenFloatToDouble) {
2437 Pop(srcSingle);
2438 }
2439 #else
2440 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
2441 #endif
2442
2443 if (compilingWasm) {
2444 Pop(WasmTlsReg);
2445 }
2446 }
2447
convertDoubleToInt(FloatRegister src,Register output,FloatRegister temp,Label * truncateFail,Label * fail,IntConversionBehavior behavior)2448 void MacroAssembler::convertDoubleToInt(FloatRegister src, Register output,
2449 FloatRegister temp, Label* truncateFail,
2450 Label* fail,
2451 IntConversionBehavior behavior) {
2452 switch (behavior) {
2453 case IntConversionBehavior::Normal:
2454 case IntConversionBehavior::NegativeZeroCheck:
2455 convertDoubleToInt32(
2456 src, output, fail,
2457 behavior == IntConversionBehavior::NegativeZeroCheck);
2458 break;
2459 case IntConversionBehavior::Truncate:
2460 branchTruncateDoubleMaybeModUint32(src, output,
2461 truncateFail ? truncateFail : fail);
2462 break;
2463 case IntConversionBehavior::TruncateNoWrap:
2464 branchTruncateDoubleToInt32(src, output,
2465 truncateFail ? truncateFail : fail);
2466 break;
2467 case IntConversionBehavior::ClampToUint8:
2468 // Clamping clobbers the input register, so use a temp.
2469 if (src != temp) {
2470 moveDouble(src, temp);
2471 }
2472 clampDoubleToUint8(temp, output);
2473 break;
2474 }
2475 }
2476
convertValueToInt(ValueOperand value,Label * handleStringEntry,Label * handleStringRejoin,Label * truncateDoubleSlow,Register stringReg,FloatRegister temp,Register output,Label * fail,IntConversionBehavior behavior,IntConversionInputKind conversion)2477 void MacroAssembler::convertValueToInt(
2478 ValueOperand value, Label* handleStringEntry, Label* handleStringRejoin,
2479 Label* truncateDoubleSlow, Register stringReg, FloatRegister temp,
2480 Register output, Label* fail, IntConversionBehavior behavior,
2481 IntConversionInputKind conversion) {
2482 Label done, isInt32, isBool, isDouble, isNull, isString;
2483
2484 bool handleStrings = (behavior == IntConversionBehavior::Truncate ||
2485 behavior == IntConversionBehavior::ClampToUint8) &&
2486 handleStringEntry && handleStringRejoin;
2487
2488 MOZ_ASSERT_IF(handleStrings, conversion == IntConversionInputKind::Any);
2489
2490 {
2491 ScratchTagScope tag(*this, value);
2492 splitTagForTest(value, tag);
2493
2494 branchTestInt32(Equal, tag, &isInt32);
2495 if (conversion == IntConversionInputKind::Any ||
2496 conversion == IntConversionInputKind::NumbersOrBoolsOnly) {
2497 branchTestBoolean(Equal, tag, &isBool);
2498 }
2499 branchTestDouble(Equal, tag, &isDouble);
2500
2501 if (conversion == IntConversionInputKind::Any) {
2502 // If we are not truncating, we fail for anything that's not
2503 // null. Otherwise we might be able to handle strings and undefined.
2504 switch (behavior) {
2505 case IntConversionBehavior::Normal:
2506 case IntConversionBehavior::NegativeZeroCheck:
2507 branchTestNull(Assembler::NotEqual, tag, fail);
2508 break;
2509
2510 case IntConversionBehavior::Truncate:
2511 case IntConversionBehavior::TruncateNoWrap:
2512 case IntConversionBehavior::ClampToUint8:
2513 branchTestNull(Equal, tag, &isNull);
2514 if (handleStrings) {
2515 branchTestString(Equal, tag, &isString);
2516 }
2517 branchTestUndefined(Assembler::NotEqual, tag, fail);
2518 break;
2519 }
2520 } else {
2521 jump(fail);
2522 }
2523 }
2524
2525 // The value is null or undefined in truncation contexts - just emit 0.
2526 if (conversion == IntConversionInputKind::Any) {
2527 if (isNull.used()) {
2528 bind(&isNull);
2529 }
2530 mov(ImmWord(0), output);
2531 jump(&done);
2532 }
2533
2534 // |output| needs to be different from |stringReg| to load string indices.
2535 bool handleStringIndices = handleStrings && output != stringReg;
2536
2537 // First try loading a string index. If that fails, try converting a string
2538 // into a double, then jump to the double case.
2539 Label handleStringIndex;
2540 if (handleStrings) {
2541 bind(&isString);
2542 unboxString(value, stringReg);
2543 if (handleStringIndices) {
2544 loadStringIndexValue(stringReg, output, handleStringEntry);
2545 jump(&handleStringIndex);
2546 } else {
2547 jump(handleStringEntry);
2548 }
2549 }
2550
2551 // Try converting double into integer.
2552 if (isDouble.used() || handleStrings) {
2553 if (isDouble.used()) {
2554 bind(&isDouble);
2555 unboxDouble(value, temp);
2556 }
2557
2558 if (handleStrings) {
2559 bind(handleStringRejoin);
2560 }
2561
2562 convertDoubleToInt(temp, output, temp, truncateDoubleSlow, fail, behavior);
2563 jump(&done);
2564 }
2565
2566 // Just unbox a bool, the result is 0 or 1.
2567 if (isBool.used()) {
2568 bind(&isBool);
2569 unboxBoolean(value, output);
2570 jump(&done);
2571 }
2572
2573 // Integers can be unboxed.
2574 if (isInt32.used() || handleStringIndices) {
2575 if (isInt32.used()) {
2576 bind(&isInt32);
2577 unboxInt32(value, output);
2578 }
2579
2580 if (handleStringIndices) {
2581 bind(&handleStringIndex);
2582 }
2583
2584 if (behavior == IntConversionBehavior::ClampToUint8) {
2585 clampIntToUint8(output);
2586 }
2587 }
2588
2589 bind(&done);
2590 }
2591
finish()2592 void MacroAssembler::finish() {
2593 if (failureLabel_.used()) {
2594 bind(&failureLabel_);
2595 handleFailure();
2596 }
2597
2598 MacroAssemblerSpecific::finish();
2599
2600 MOZ_RELEASE_ASSERT(
2601 size() <= MaxCodeBytesPerProcess,
2602 "AssemblerBuffer should ensure we don't exceed MaxCodeBytesPerProcess");
2603
2604 if (bytesNeeded() > MaxCodeBytesPerProcess) {
2605 setOOM();
2606 }
2607 }
2608
link(JitCode * code)2609 void MacroAssembler::link(JitCode* code) {
2610 MOZ_ASSERT(!oom());
2611 linkProfilerCallSites(code);
2612 }
2613
2614 MacroAssembler::AutoProfilerCallInstrumentation::
AutoProfilerCallInstrumentation(MacroAssembler & masm)2615 AutoProfilerCallInstrumentation(MacroAssembler& masm) {
2616 if (!masm.emitProfilingInstrumentation_) {
2617 return;
2618 }
2619
2620 Register reg = CallTempReg0;
2621 Register reg2 = CallTempReg1;
2622 masm.push(reg);
2623 masm.push(reg2);
2624
2625 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
2626 masm.loadJSContext(reg2);
2627 masm.loadPtr(Address(reg2, offsetof(JSContext, profilingActivation_)), reg2);
2628 masm.storePtr(reg,
2629 Address(reg2, JitActivation::offsetOfLastProfilingCallSite()));
2630
2631 masm.appendProfilerCallSite(label);
2632
2633 masm.pop(reg2);
2634 masm.pop(reg);
2635 }
2636
linkProfilerCallSites(JitCode * code)2637 void MacroAssembler::linkProfilerCallSites(JitCode* code) {
2638 for (size_t i = 0; i < profilerCallSites_.length(); i++) {
2639 CodeOffset offset = profilerCallSites_[i];
2640 CodeLocationLabel location(code, offset);
2641 PatchDataWithValueCheck(location, ImmPtr(location.raw()),
2642 ImmPtr((void*)-1));
2643 }
2644 }
2645
alignJitStackBasedOnNArgs(Register nargs,bool countIncludesThis)2646 void MacroAssembler::alignJitStackBasedOnNArgs(Register nargs,
2647 bool countIncludesThis) {
2648 // The stack should already be aligned to the size of a value.
2649 assertStackAlignment(sizeof(Value), 0);
2650
2651 static_assert(JitStackValueAlignment == 1 || JitStackValueAlignment == 2,
2652 "JitStackValueAlignment is either 1 or 2.");
2653 if (JitStackValueAlignment == 1) {
2654 return;
2655 }
2656 // A jit frame is composed of the following:
2657 //
2658 // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
2659 // \________JitFrameLayout_________/
2660 // (The stack grows this way --->)
2661 //
2662 // We want to ensure that |raddr|, the return address, is 16-byte aligned.
2663 // (Note: if 8-byte alignment was sufficient, we would have already
2664 // returned above.)
2665
2666 // JitFrameLayout does not affect the alignment, so we can ignore it.
2667 static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
2668 "JitFrameLayout doesn't affect stack alignment");
2669
2670 // Therefore, we need to ensure that |this| is aligned.
2671 // This implies that |argN| must be aligned if N is even,
2672 // and offset by |sizeof(Value)| if N is odd.
2673
2674 // Depending on the context of the caller, it may be easier to pass in a
2675 // register that has already been modified to include |this|. If that is the
2676 // case, we want to flip the direction of the test.
2677 Assembler::Condition condition =
2678 countIncludesThis ? Assembler::NonZero : Assembler::Zero;
2679
2680 Label alignmentIsOffset, end;
2681 branchTestPtr(condition, nargs, Imm32(1), &alignmentIsOffset);
2682
2683 // |argN| should be aligned to 16 bytes.
2684 andToStackPtr(Imm32(~(JitStackAlignment - 1)));
2685 jump(&end);
2686
2687 // |argN| should be offset by 8 bytes from 16-byte alignment.
2688 // We already know that it is 8-byte aligned, so the only possibilities are:
2689 // a) It is 16-byte aligned, and we must offset it by 8 bytes.
2690 // b) It is not 16-byte aligned, and therefore already has the right offset.
2691 // Therefore, we test to see if it is 16-byte aligned, and adjust it if it is.
2692 bind(&alignmentIsOffset);
2693 branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
2694 subFromStackPtr(Imm32(sizeof(Value)));
2695
2696 bind(&end);
2697 }
2698
alignJitStackBasedOnNArgs(uint32_t argc)2699 void MacroAssembler::alignJitStackBasedOnNArgs(uint32_t argc) {
2700 // The stack should already be aligned to the size of a value.
2701 assertStackAlignment(sizeof(Value), 0);
2702
2703 static_assert(JitStackValueAlignment == 1 || JitStackValueAlignment == 2,
2704 "JitStackValueAlignment is either 1 or 2.");
2705 if (JitStackValueAlignment == 1) {
2706 return;
2707 }
2708
2709 // See above for full explanation.
2710 uint32_t nArgs = argc + 1;
2711 if (nArgs % 2 == 0) {
2712 // |argN| should be 16-byte aligned
2713 andToStackPtr(Imm32(~(JitStackAlignment - 1)));
2714 } else {
2715 // |argN| must be 16-byte aligned if argc is even,
2716 // and offset by 8 if argc is odd.
2717 Label end;
2718 branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
2719 subFromStackPtr(Imm32(sizeof(Value)));
2720 bind(&end);
2721 assertStackAlignment(JitStackAlignment, sizeof(Value));
2722 }
2723 }
2724
2725 // ===============================================================
2726
MacroAssembler(JSContext * cx)2727 MacroAssembler::MacroAssembler(JSContext* cx)
2728 : wasmMaxOffsetGuardLimit_(0),
2729 framePushed_(0),
2730 #ifdef DEBUG
2731 inCall_(false),
2732 #endif
2733 dynamicAlignment_(false),
2734 emitProfilingInstrumentation_(false) {
2735 jitContext_.emplace(cx, (js::jit::TempAllocator*)nullptr);
2736 alloc_.emplace(&cx->tempLifoAlloc());
2737 moveResolver_.setAllocator(*jitContext_->temp);
2738 #if defined(JS_CODEGEN_ARM)
2739 initWithAllocator();
2740 m_buffer.id = GetJitContext()->getNextAssemblerId();
2741 #elif defined(JS_CODEGEN_ARM64)
2742 initWithAllocator();
2743 armbuffer_.id = GetJitContext()->getNextAssemblerId();
2744 #endif
2745 }
2746
MacroAssembler()2747 MacroAssembler::MacroAssembler()
2748 : wasmMaxOffsetGuardLimit_(0),
2749 framePushed_(0),
2750 #ifdef DEBUG
2751 inCall_(false),
2752 #endif
2753 dynamicAlignment_(false),
2754 emitProfilingInstrumentation_(false) {
2755 JitContext* jcx = GetJitContext();
2756
2757 if (!jcx->temp) {
2758 JSContext* cx = jcx->cx;
2759 MOZ_ASSERT(cx);
2760 alloc_.emplace(&cx->tempLifoAlloc());
2761 }
2762
2763 moveResolver_.setAllocator(*jcx->temp);
2764
2765 #if defined(JS_CODEGEN_ARM)
2766 initWithAllocator();
2767 m_buffer.id = jcx->getNextAssemblerId();
2768 #elif defined(JS_CODEGEN_ARM64)
2769 initWithAllocator();
2770 armbuffer_.id = jcx->getNextAssemblerId();
2771 #endif
2772 }
2773
MacroAssembler(WasmToken,TempAllocator & alloc)2774 MacroAssembler::MacroAssembler(WasmToken, TempAllocator& alloc)
2775 : wasmMaxOffsetGuardLimit_(0),
2776 framePushed_(0),
2777 #ifdef DEBUG
2778 inCall_(false),
2779 #endif
2780 dynamicAlignment_(false),
2781 emitProfilingInstrumentation_(false) {
2782 moveResolver_.setAllocator(alloc);
2783
2784 #if defined(JS_CODEGEN_ARM)
2785 initWithAllocator();
2786 m_buffer.id = 0;
2787 #elif defined(JS_CODEGEN_ARM64)
2788 initWithAllocator();
2789 // Stubs + builtins + the baseline compiler all require the native SP,
2790 // not the PSP.
2791 SetStackPointer64(sp);
2792 armbuffer_.id = 0;
2793 #endif
2794 }
2795
WasmMacroAssembler(TempAllocator & alloc,bool limitedSize)2796 WasmMacroAssembler::WasmMacroAssembler(TempAllocator& alloc, bool limitedSize)
2797 : MacroAssembler(WasmToken(), alloc) {
2798 if (!limitedSize) {
2799 setUnlimitedBuffer();
2800 }
2801 }
2802
WasmMacroAssembler(TempAllocator & alloc,const wasm::ModuleEnvironment & env,bool limitedSize)2803 WasmMacroAssembler::WasmMacroAssembler(TempAllocator& alloc,
2804 const wasm::ModuleEnvironment& env,
2805 bool limitedSize)
2806 : MacroAssembler(WasmToken(), alloc) {
2807 setWasmMaxOffsetGuardLimit(
2808 wasm::GetMaxOffsetGuardLimit(env.hugeMemoryEnabled()));
2809 if (!limitedSize) {
2810 setUnlimitedBuffer();
2811 }
2812 }
2813
icBuildOOLFakeExitFrame(void * fakeReturnAddr,AutoSaveLiveRegisters & save)2814 bool MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr,
2815 AutoSaveLiveRegisters& save) {
2816 return buildOOLFakeExitFrame(fakeReturnAddr);
2817 }
2818
2819 #ifndef JS_CODEGEN_ARM64
subFromStackPtr(Register reg)2820 void MacroAssembler::subFromStackPtr(Register reg) {
2821 subPtr(reg, getStackPointer());
2822 }
2823 #endif // JS_CODEGEN_ARM64
2824
2825 //{{{ check_macroassembler_style
2826 // ===============================================================
2827 // Stack manipulation functions.
2828
PushRegsInMask(LiveGeneralRegisterSet set)2829 void MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set) {
2830 PushRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
2831 }
2832
PopRegsInMask(LiveRegisterSet set)2833 void MacroAssembler::PopRegsInMask(LiveRegisterSet set) {
2834 PopRegsInMaskIgnore(set, LiveRegisterSet());
2835 }
2836
PopRegsInMask(LiveGeneralRegisterSet set)2837 void MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set) {
2838 PopRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
2839 }
2840
Push(PropertyKey key,Register scratchReg)2841 void MacroAssembler::Push(PropertyKey key, Register scratchReg) {
2842 if (key.isGCThing()) {
2843 // If we're pushing a gcthing, then we can't just push the tagged key
2844 // value since the GC won't have any idea that the push instruction
2845 // carries a reference to a gcthing. Need to unpack the pointer,
2846 // push it using ImmGCPtr, and then rematerialize the PropertyKey at
2847 // runtime.
2848
2849 if (key.isString()) {
2850 JSString* str = key.toString();
2851 MOZ_ASSERT((uintptr_t(str) & JSID_TYPE_MASK) == 0);
2852 static_assert(JSID_TYPE_STRING == 0,
2853 "need to orPtr JSID_TYPE_STRING tag if it's not 0");
2854 Push(ImmGCPtr(str));
2855 } else {
2856 MOZ_ASSERT(key.isSymbol());
2857 movePropertyKey(key, scratchReg);
2858 Push(scratchReg);
2859 }
2860 } else {
2861 MOZ_ASSERT(key.isInt());
2862 Push(ImmWord(key.asBits));
2863 }
2864 }
2865
movePropertyKey(PropertyKey key,Register dest)2866 void MacroAssembler::movePropertyKey(PropertyKey key, Register dest) {
2867 if (key.isGCThing()) {
2868 // See comment in |Push(PropertyKey, ...)| above for an explanation.
2869 if (key.isString()) {
2870 JSString* str = key.toString();
2871 MOZ_ASSERT((uintptr_t(str) & JSID_TYPE_MASK) == 0);
2872 static_assert(JSID_TYPE_STRING == 0,
2873 "need to orPtr JSID_TYPE_STRING tag if it's not 0");
2874 movePtr(ImmGCPtr(str), dest);
2875 } else {
2876 MOZ_ASSERT(key.isSymbol());
2877 JS::Symbol* sym = key.toSymbol();
2878 movePtr(ImmGCPtr(sym), dest);
2879 orPtr(Imm32(JSID_TYPE_SYMBOL), dest);
2880 }
2881 } else {
2882 MOZ_ASSERT(key.isInt());
2883 movePtr(ImmWord(key.asBits), dest);
2884 }
2885 }
2886
Push(TypedOrValueRegister v)2887 void MacroAssembler::Push(TypedOrValueRegister v) {
2888 if (v.hasValue()) {
2889 Push(v.valueReg());
2890 } else if (IsFloatingPointType(v.type())) {
2891 FloatRegister reg = v.typedReg().fpu();
2892 if (v.type() == MIRType::Float32) {
2893 ScratchDoubleScope fpscratch(*this);
2894 convertFloat32ToDouble(reg, fpscratch);
2895 PushBoxed(fpscratch);
2896 } else {
2897 PushBoxed(reg);
2898 }
2899 } else {
2900 Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr());
2901 }
2902 }
2903
Push(const ConstantOrRegister & v)2904 void MacroAssembler::Push(const ConstantOrRegister& v) {
2905 if (v.constant()) {
2906 Push(v.value());
2907 } else {
2908 Push(v.reg());
2909 }
2910 }
2911
Push(const Address & addr)2912 void MacroAssembler::Push(const Address& addr) {
2913 push(addr);
2914 framePushed_ += sizeof(uintptr_t);
2915 }
2916
Push(const ValueOperand & val)2917 void MacroAssembler::Push(const ValueOperand& val) {
2918 pushValue(val);
2919 framePushed_ += sizeof(Value);
2920 }
2921
Push(const Value & val)2922 void MacroAssembler::Push(const Value& val) {
2923 pushValue(val);
2924 framePushed_ += sizeof(Value);
2925 }
2926
Push(JSValueType type,Register reg)2927 void MacroAssembler::Push(JSValueType type, Register reg) {
2928 pushValue(type, reg);
2929 framePushed_ += sizeof(Value);
2930 }
2931
Push(const Register64 reg)2932 void MacroAssembler::Push(const Register64 reg) {
2933 #if JS_BITS_PER_WORD == 64
2934 Push(reg.reg);
2935 #else
2936 MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Big-endian not supported.");
2937 Push(reg.high);
2938 Push(reg.low);
2939 #endif
2940 }
2941
PushEmptyRooted(VMFunctionData::RootType rootType)2942 void MacroAssembler::PushEmptyRooted(VMFunctionData::RootType rootType) {
2943 switch (rootType) {
2944 case VMFunctionData::RootNone:
2945 MOZ_CRASH("Handle must have root type");
2946 case VMFunctionData::RootObject:
2947 case VMFunctionData::RootString:
2948 case VMFunctionData::RootFunction:
2949 case VMFunctionData::RootCell:
2950 case VMFunctionData::RootBigInt:
2951 Push(ImmPtr(nullptr));
2952 break;
2953 case VMFunctionData::RootValue:
2954 Push(UndefinedValue());
2955 break;
2956 case VMFunctionData::RootId:
2957 Push(ImmWord(JSID_BITS(JSID_VOID)));
2958 break;
2959 }
2960 }
2961
popRooted(VMFunctionData::RootType rootType,Register cellReg,const ValueOperand & valueReg)2962 void MacroAssembler::popRooted(VMFunctionData::RootType rootType,
2963 Register cellReg, const ValueOperand& valueReg) {
2964 switch (rootType) {
2965 case VMFunctionData::RootNone:
2966 MOZ_CRASH("Handle must have root type");
2967 case VMFunctionData::RootObject:
2968 case VMFunctionData::RootString:
2969 case VMFunctionData::RootFunction:
2970 case VMFunctionData::RootCell:
2971 case VMFunctionData::RootId:
2972 case VMFunctionData::RootBigInt:
2973 Pop(cellReg);
2974 break;
2975 case VMFunctionData::RootValue:
2976 Pop(valueReg);
2977 break;
2978 }
2979 }
2980
adjustStack(int amount)2981 void MacroAssembler::adjustStack(int amount) {
2982 if (amount > 0) {
2983 freeStack(amount);
2984 } else if (amount < 0) {
2985 reserveStack(-amount);
2986 }
2987 }
2988
freeStack(uint32_t amount)2989 void MacroAssembler::freeStack(uint32_t amount) {
2990 MOZ_ASSERT(amount <= framePushed_);
2991 if (amount) {
2992 addToStackPtr(Imm32(amount));
2993 }
2994 framePushed_ -= amount;
2995 }
2996
freeStack(Register amount)2997 void MacroAssembler::freeStack(Register amount) { addToStackPtr(amount); }
2998
2999 // ===============================================================
3000 // ABI function calls.
3001 template <class ABIArgGeneratorT>
setupABICallHelper()3002 void MacroAssembler::setupABICallHelper() {
3003 #ifdef DEBUG
3004 MOZ_ASSERT(!inCall_);
3005 inCall_ = true;
3006 #endif
3007
3008 #ifdef JS_SIMULATOR
3009 signature_ = 0;
3010 #endif
3011
3012 // Reinitialize the ABIArg generator.
3013 abiArgs_ = ABIArgGeneratorT();
3014
3015 #if defined(JS_CODEGEN_ARM)
3016 // On ARM, we need to know what ABI we are using, either in the
3017 // simulator, or based on the configure flags.
3018 # if defined(JS_SIMULATOR_ARM)
3019 abiArgs_.setUseHardFp(UseHardFpABI());
3020 # elif defined(JS_CODEGEN_ARM_HARDFP)
3021 abiArgs_.setUseHardFp(true);
3022 # else
3023 abiArgs_.setUseHardFp(false);
3024 # endif
3025 #endif
3026
3027 #if defined(JS_CODEGEN_MIPS32)
3028 // On MIPS, the system ABI use general registers pairs to encode double
3029 // arguments, after one or 2 integer-like arguments. Unfortunately, the
3030 // Lowering phase is not capable to express it at the moment. So we enforce
3031 // the system ABI here.
3032 abiArgs_.enforceO32ABI();
3033 #endif
3034 }
3035
setupNativeABICall()3036 void MacroAssembler::setupNativeABICall() {
3037 setupABICallHelper<ABIArgGenerator>();
3038 }
3039
setupWasmABICall()3040 void MacroAssembler::setupWasmABICall() {
3041 MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
3042 setupABICallHelper<WasmABIArgGenerator>();
3043
3044 #if defined(JS_CODEGEN_ARM)
3045 // The builtin thunk does the FP -> GPR moving on soft-FP, so
3046 // use hard fp unconditionally.
3047 abiArgs_.setUseHardFp(true);
3048 #endif
3049 dynamicAlignment_ = false;
3050 }
3051
setupAlignedABICall()3052 void MacroAssembler::setupAlignedABICall() {
3053 MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
3054 setupNativeABICall();
3055 dynamicAlignment_ = false;
3056
3057 #if defined(JS_CODEGEN_ARM64)
3058 MOZ_CRASH("Not supported on arm64");
3059 #endif
3060 }
3061
passABIArg(const MoveOperand & from,MoveOp::Type type)3062 void MacroAssembler::passABIArg(const MoveOperand& from, MoveOp::Type type) {
3063 MOZ_ASSERT(inCall_);
3064 appendSignatureType(type);
3065
3066 ABIArg arg;
3067 switch (type) {
3068 case MoveOp::FLOAT32:
3069 arg = abiArgs_.next(MIRType::Float32);
3070 break;
3071 case MoveOp::DOUBLE:
3072 arg = abiArgs_.next(MIRType::Double);
3073 break;
3074 case MoveOp::GENERAL:
3075 arg = abiArgs_.next(MIRType::Pointer);
3076 break;
3077 default:
3078 MOZ_CRASH("Unexpected argument type");
3079 }
3080
3081 MoveOperand to(*this, arg);
3082 if (from == to) {
3083 return;
3084 }
3085
3086 if (oom()) {
3087 return;
3088 }
3089 propagateOOM(moveResolver_.addMove(from, to, type));
3090 }
3091
callWithABINoProfiler(void * fun,MoveOp::Type result,CheckUnsafeCallWithABI check)3092 void MacroAssembler::callWithABINoProfiler(void* fun, MoveOp::Type result,
3093 CheckUnsafeCallWithABI check) {
3094 appendSignatureType(result);
3095 #ifdef JS_SIMULATOR
3096 fun = Simulator::RedirectNativeFunction(fun, signature());
3097 #endif
3098
3099 uint32_t stackAdjust;
3100 callWithABIPre(&stackAdjust);
3101
3102 #ifdef DEBUG
3103 if (check == CheckUnsafeCallWithABI::Check) {
3104 push(ReturnReg);
3105 loadJSContext(ReturnReg);
3106 Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
3107 store32(Imm32(1), flagAddr);
3108 pop(ReturnReg);
3109 // On arm64, SP may be < PSP now (that's OK).
3110 // eg testcase: tests/bug1375074.js
3111 }
3112 #endif
3113
3114 call(ImmPtr(fun));
3115
3116 callWithABIPost(stackAdjust, result);
3117
3118 #ifdef DEBUG
3119 if (check == CheckUnsafeCallWithABI::Check) {
3120 Label ok;
3121 push(ReturnReg);
3122 loadJSContext(ReturnReg);
3123 Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
3124 branch32(Assembler::Equal, flagAddr, Imm32(0), &ok);
3125 assumeUnreachable("callWithABI: callee did not use AutoUnsafeCallWithABI");
3126 bind(&ok);
3127 pop(ReturnReg);
3128 // On arm64, SP may be < PSP now (that's OK).
3129 // eg testcase: tests/bug1375074.js
3130 }
3131 #endif
3132 }
3133
callWithABI(wasm::BytecodeOffset bytecode,wasm::SymbolicAddress imm,mozilla::Maybe<int32_t> tlsOffset,MoveOp::Type result)3134 CodeOffset MacroAssembler::callWithABI(wasm::BytecodeOffset bytecode,
3135 wasm::SymbolicAddress imm,
3136 mozilla::Maybe<int32_t> tlsOffset,
3137 MoveOp::Type result) {
3138 MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm));
3139
3140 uint32_t stackAdjust;
3141 callWithABIPre(&stackAdjust, /* callFromWasm = */ true);
3142
3143 // The TLS register is used in builtin thunks and must be set.
3144 if (tlsOffset) {
3145 loadPtr(Address(getStackPointer(), *tlsOffset + stackAdjust), WasmTlsReg);
3146 } else {
3147 MOZ_CRASH("tlsOffset is Nothing only for unsupported abi calls.");
3148 }
3149 CodeOffset raOffset = call(
3150 wasm::CallSiteDesc(bytecode.offset(), wasm::CallSite::Symbolic), imm);
3151
3152 callWithABIPost(stackAdjust, result, /* callFromWasm = */ true);
3153
3154 return raOffset;
3155 }
3156
callDebugWithABI(wasm::SymbolicAddress imm,MoveOp::Type result)3157 void MacroAssembler::callDebugWithABI(wasm::SymbolicAddress imm,
3158 MoveOp::Type result) {
3159 MOZ_ASSERT(!wasm::NeedsBuiltinThunk(imm));
3160 uint32_t stackAdjust;
3161 callWithABIPre(&stackAdjust, /* callFromWasm = */ false);
3162 call(imm);
3163 callWithABIPost(stackAdjust, result, /* callFromWasm = */ false);
3164 }
3165
3166 // ===============================================================
3167 // Exit frame footer.
3168
linkExitFrame(Register cxreg,Register scratch)3169 void MacroAssembler::linkExitFrame(Register cxreg, Register scratch) {
3170 loadPtr(Address(cxreg, JSContext::offsetOfActivation()), scratch);
3171 storeStackPtr(Address(scratch, JitActivation::offsetOfPackedExitFP()));
3172 }
3173
3174 // ===============================================================
3175 // Simple value-shuffling helpers, to hide MoveResolver verbosity
3176 // in common cases.
3177
moveRegPair(Register src0,Register src1,Register dst0,Register dst1,MoveOp::Type type)3178 void MacroAssembler::moveRegPair(Register src0, Register src1, Register dst0,
3179 Register dst1, MoveOp::Type type) {
3180 MoveResolver& moves = moveResolver();
3181 if (src0 != dst0) {
3182 propagateOOM(moves.addMove(MoveOperand(src0), MoveOperand(dst0), type));
3183 }
3184 if (src1 != dst1) {
3185 propagateOOM(moves.addMove(MoveOperand(src1), MoveOperand(dst1), type));
3186 }
3187 propagateOOM(moves.resolve());
3188 if (oom()) {
3189 return;
3190 }
3191
3192 MoveEmitter emitter(*this);
3193 emitter.emit(moves);
3194 emitter.finish();
3195 }
3196
3197 // ===============================================================
3198 // Arithmetic functions
3199
pow32(Register base,Register power,Register dest,Register temp1,Register temp2,Label * onOver)3200 void MacroAssembler::pow32(Register base, Register power, Register dest,
3201 Register temp1, Register temp2, Label* onOver) {
3202 // Inline int32-specialized implementation of js::powi with overflow
3203 // detection.
3204
3205 move32(Imm32(1), dest); // p = 1
3206
3207 // x^y where x == 1 returns 1 for any y.
3208 Label done;
3209 branch32(Assembler::Equal, base, Imm32(1), &done);
3210
3211 move32(base, temp1); // m = x
3212 move32(power, temp2); // n = y
3213
3214 // x^y where y < 0 returns a non-int32 value for any x != 1. Except when y is
3215 // large enough so that the result is no longer representable as a double with
3216 // fractional parts. We can't easily determine when y is too large, so we bail
3217 // here.
3218 // Note: it's important for this condition to match the code in CacheIR.cpp
3219 // (CanAttachInt32Pow) to prevent failure loops.
3220 Label start;
3221 branchTest32(Assembler::NotSigned, power, power, &start);
3222 jump(onOver);
3223
3224 Label loop;
3225 bind(&loop);
3226
3227 // m *= m
3228 branchMul32(Assembler::Overflow, temp1, temp1, onOver);
3229
3230 bind(&start);
3231
3232 // if ((n & 1) != 0) p *= m
3233 Label even;
3234 branchTest32(Assembler::Zero, temp2, Imm32(1), &even);
3235 branchMul32(Assembler::Overflow, temp1, dest, onOver);
3236 bind(&even);
3237
3238 // n >>= 1
3239 // if (n == 0) return p
3240 branchRshift32(Assembler::NonZero, Imm32(1), temp2, &loop);
3241
3242 bind(&done);
3243 }
3244
signInt32(Register input,Register output)3245 void MacroAssembler::signInt32(Register input, Register output) {
3246 MOZ_ASSERT(input != output);
3247
3248 Label done;
3249 move32(input, output);
3250 rshift32Arithmetic(Imm32(31), output);
3251 branch32(Assembler::LessThanOrEqual, input, Imm32(0), &done);
3252 move32(Imm32(1), output);
3253 bind(&done);
3254 }
3255
signDouble(FloatRegister input,FloatRegister output)3256 void MacroAssembler::signDouble(FloatRegister input, FloatRegister output) {
3257 MOZ_ASSERT(input != output);
3258
3259 Label done, zeroOrNaN, negative;
3260 loadConstantDouble(0.0, output);
3261 branchDouble(Assembler::DoubleEqualOrUnordered, input, output, &zeroOrNaN);
3262 branchDouble(Assembler::DoubleLessThan, input, output, &negative);
3263
3264 loadConstantDouble(1.0, output);
3265 jump(&done);
3266
3267 bind(&negative);
3268 loadConstantDouble(-1.0, output);
3269 jump(&done);
3270
3271 bind(&zeroOrNaN);
3272 moveDouble(input, output);
3273
3274 bind(&done);
3275 }
3276
signDoubleToInt32(FloatRegister input,Register output,FloatRegister temp,Label * fail)3277 void MacroAssembler::signDoubleToInt32(FloatRegister input, Register output,
3278 FloatRegister temp, Label* fail) {
3279 MOZ_ASSERT(input != temp);
3280
3281 Label done, zeroOrNaN, negative;
3282 loadConstantDouble(0.0, temp);
3283 branchDouble(Assembler::DoubleEqualOrUnordered, input, temp, &zeroOrNaN);
3284 branchDouble(Assembler::DoubleLessThan, input, temp, &negative);
3285
3286 move32(Imm32(1), output);
3287 jump(&done);
3288
3289 bind(&negative);
3290 move32(Imm32(-1), output);
3291 jump(&done);
3292
3293 // Fail for NaN and negative zero.
3294 bind(&zeroOrNaN);
3295 branchDouble(Assembler::DoubleUnordered, input, input, fail);
3296
3297 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
3298 // is -Infinity instead of Infinity.
3299 loadConstantDouble(1.0, temp);
3300 divDouble(input, temp);
3301 branchDouble(Assembler::DoubleLessThan, temp, input, fail);
3302 move32(Imm32(0), output);
3303
3304 bind(&done);
3305 }
3306
randomDouble(Register rng,FloatRegister dest,Register64 temp0,Register64 temp1)3307 void MacroAssembler::randomDouble(Register rng, FloatRegister dest,
3308 Register64 temp0, Register64 temp1) {
3309 using mozilla::non_crypto::XorShift128PlusRNG;
3310
3311 static_assert(
3312 sizeof(XorShift128PlusRNG) == 2 * sizeof(uint64_t),
3313 "Code below assumes XorShift128PlusRNG contains two uint64_t values");
3314
3315 Address state0Addr(rng, XorShift128PlusRNG::offsetOfState0());
3316 Address state1Addr(rng, XorShift128PlusRNG::offsetOfState1());
3317
3318 Register64 s0Reg = temp0;
3319 Register64 s1Reg = temp1;
3320
3321 // uint64_t s1 = mState[0];
3322 load64(state0Addr, s1Reg);
3323
3324 // s1 ^= s1 << 23;
3325 move64(s1Reg, s0Reg);
3326 lshift64(Imm32(23), s1Reg);
3327 xor64(s0Reg, s1Reg);
3328
3329 // s1 ^= s1 >> 17
3330 move64(s1Reg, s0Reg);
3331 rshift64(Imm32(17), s1Reg);
3332 xor64(s0Reg, s1Reg);
3333
3334 // const uint64_t s0 = mState[1];
3335 load64(state1Addr, s0Reg);
3336
3337 // mState[0] = s0;
3338 store64(s0Reg, state0Addr);
3339
3340 // s1 ^= s0
3341 xor64(s0Reg, s1Reg);
3342
3343 // s1 ^= s0 >> 26
3344 rshift64(Imm32(26), s0Reg);
3345 xor64(s0Reg, s1Reg);
3346
3347 // mState[1] = s1
3348 store64(s1Reg, state1Addr);
3349
3350 // s1 += mState[0]
3351 load64(state0Addr, s0Reg);
3352 add64(s0Reg, s1Reg);
3353
3354 // See comment in XorShift128PlusRNG::nextDouble().
3355 static constexpr int MantissaBits =
3356 mozilla::FloatingPoint<double>::kExponentShift + 1;
3357 static constexpr double ScaleInv = double(1) / (1ULL << MantissaBits);
3358
3359 and64(Imm64((1ULL << MantissaBits) - 1), s1Reg);
3360
3361 // Note: we know s1Reg isn't signed after the and64 so we can use the faster
3362 // convertInt64ToDouble instead of convertUInt64ToDouble.
3363 convertInt64ToDouble(s1Reg, dest);
3364
3365 // dest *= ScaleInv
3366 mulDoublePtr(ImmPtr(&ScaleInv), s0Reg.scratchReg(), dest);
3367 }
3368
sameValueDouble(FloatRegister left,FloatRegister right,FloatRegister temp,Register dest)3369 void MacroAssembler::sameValueDouble(FloatRegister left, FloatRegister right,
3370 FloatRegister temp, Register dest) {
3371 Label nonEqual, isSameValue, isNotSameValue;
3372 branchDouble(Assembler::DoubleNotEqualOrUnordered, left, right, &nonEqual);
3373 {
3374 // First, test for being equal to 0.0, which also includes -0.0.
3375 loadConstantDouble(0.0, temp);
3376 branchDouble(Assembler::DoubleNotEqual, left, temp, &isSameValue);
3377
3378 // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
3379 // is -Infinity instead of Infinity.
3380 Label isNegInf;
3381 loadConstantDouble(1.0, temp);
3382 divDouble(left, temp);
3383 branchDouble(Assembler::DoubleLessThan, temp, left, &isNegInf);
3384 {
3385 loadConstantDouble(1.0, temp);
3386 divDouble(right, temp);
3387 branchDouble(Assembler::DoubleGreaterThan, temp, right, &isSameValue);
3388 jump(&isNotSameValue);
3389 }
3390 bind(&isNegInf);
3391 {
3392 loadConstantDouble(1.0, temp);
3393 divDouble(right, temp);
3394 branchDouble(Assembler::DoubleLessThan, temp, right, &isSameValue);
3395 jump(&isNotSameValue);
3396 }
3397 }
3398 bind(&nonEqual);
3399 {
3400 // Test if both values are NaN.
3401 branchDouble(Assembler::DoubleOrdered, left, left, &isNotSameValue);
3402 branchDouble(Assembler::DoubleOrdered, right, right, &isNotSameValue);
3403 }
3404
3405 Label done;
3406 bind(&isSameValue);
3407 move32(Imm32(1), dest);
3408 jump(&done);
3409
3410 bind(&isNotSameValue);
3411 move32(Imm32(0), dest);
3412
3413 bind(&done);
3414 }
3415
minMaxArrayInt32(Register array,Register result,Register temp1,Register temp2,Register temp3,bool isMax,Label * fail)3416 void MacroAssembler::minMaxArrayInt32(Register array, Register result,
3417 Register temp1, Register temp2,
3418 Register temp3, bool isMax, Label* fail) {
3419 // array must be a packed array. Load its elements.
3420 Register elements = temp1;
3421 loadPtr(Address(array, NativeObject::offsetOfElements()), elements);
3422
3423 // Load the length and guard that it is non-zero.
3424 Address lengthAddr(elements, ObjectElements::offsetOfInitializedLength());
3425 load32(lengthAddr, temp3);
3426 branchTest32(Assembler::Zero, temp3, temp3, fail);
3427
3428 // Compute the address of the last element.
3429 Register elementsEnd = temp2;
3430 BaseObjectElementIndex elementsEndAddr(elements, temp3,
3431 -int32_t(sizeof(Value)));
3432 computeEffectiveAddress(elementsEndAddr, elementsEnd);
3433
3434 // Load the first element into result.
3435 fallibleUnboxInt32(Address(elements, 0), result, fail);
3436
3437 Label loop, done;
3438 bind(&loop);
3439
3440 // Check whether we're done.
3441 branchPtr(Assembler::Equal, elements, elementsEnd, &done);
3442
3443 // If not, advance to the next element and load it.
3444 addPtr(Imm32(sizeof(Value)), elements);
3445 fallibleUnboxInt32(Address(elements, 0), temp3, fail);
3446
3447 // Update result if necessary.
3448 Assembler::Condition cond =
3449 isMax ? Assembler::GreaterThan : Assembler::LessThan;
3450 cmp32Move32(cond, temp3, result, temp3, result);
3451
3452 jump(&loop);
3453 bind(&done);
3454 }
3455
minMaxArrayNumber(Register array,FloatRegister result,FloatRegister floatTemp,Register temp1,Register temp2,bool isMax,Label * fail)3456 void MacroAssembler::minMaxArrayNumber(Register array, FloatRegister result,
3457 FloatRegister floatTemp, Register temp1,
3458 Register temp2, bool isMax,
3459 Label* fail) {
3460 // array must be a packed array. Load its elements.
3461 Register elements = temp1;
3462 loadPtr(Address(array, NativeObject::offsetOfElements()), elements);
3463
3464 // Load the length and check if the array is empty.
3465 Label isEmpty;
3466 Address lengthAddr(elements, ObjectElements::offsetOfInitializedLength());
3467 load32(lengthAddr, temp2);
3468 branchTest32(Assembler::Zero, temp2, temp2, &isEmpty);
3469
3470 // Compute the address of the last element.
3471 Register elementsEnd = temp2;
3472 BaseObjectElementIndex elementsEndAddr(elements, temp2,
3473 -int32_t(sizeof(Value)));
3474 computeEffectiveAddress(elementsEndAddr, elementsEnd);
3475
3476 // Load the first element into result.
3477 ensureDouble(Address(elements, 0), result, fail);
3478
3479 Label loop, done;
3480 bind(&loop);
3481
3482 // Check whether we're done.
3483 branchPtr(Assembler::Equal, elements, elementsEnd, &done);
3484
3485 // If not, advance to the next element and load it into floatTemp.
3486 addPtr(Imm32(sizeof(Value)), elements);
3487 ensureDouble(Address(elements, 0), floatTemp, fail);
3488
3489 // Update result if necessary.
3490 if (isMax) {
3491 maxDouble(floatTemp, result, /* handleNaN = */ true);
3492 } else {
3493 minDouble(floatTemp, result, /* handleNaN = */ true);
3494 }
3495 jump(&loop);
3496
3497 // With no arguments, min/max return +Infinity/-Infinity respectively.
3498 bind(&isEmpty);
3499 if (isMax) {
3500 loadConstantDouble(mozilla::NegativeInfinity<double>(), result);
3501 } else {
3502 loadConstantDouble(mozilla::PositiveInfinity<double>(), result);
3503 }
3504
3505 bind(&done);
3506 }
3507
branchIfNotRegExpPrototypeOptimizable(Register proto,Register temp,Label * fail)3508 void MacroAssembler::branchIfNotRegExpPrototypeOptimizable(Register proto,
3509 Register temp,
3510 Label* fail) {
3511 loadJSContext(temp);
3512 loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
3513 size_t offset = Realm::offsetOfRegExps() +
3514 RegExpRealm::offsetOfOptimizableRegExpPrototypeShape();
3515 loadPtr(Address(temp, offset), temp);
3516 branchTestObjShapeUnsafe(Assembler::NotEqual, proto, temp, fail);
3517 }
3518
branchIfNotRegExpInstanceOptimizable(Register regexp,Register temp,Label * label)3519 void MacroAssembler::branchIfNotRegExpInstanceOptimizable(Register regexp,
3520 Register temp,
3521 Label* label) {
3522 loadJSContext(temp);
3523 loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
3524 size_t offset = Realm::offsetOfRegExps() +
3525 RegExpRealm::offsetOfOptimizableRegExpInstanceShape();
3526 loadPtr(Address(temp, offset), temp);
3527 branchTestObjShapeUnsafe(Assembler::NotEqual, regexp, temp, label);
3528 }
3529
3530 // ===============================================================
3531 // Branch functions
3532
loadFunctionLength(Register func,Register funFlags,Register output,Label * slowPath)3533 void MacroAssembler::loadFunctionLength(Register func, Register funFlags,
3534 Register output, Label* slowPath) {
3535 #ifdef DEBUG
3536 {
3537 // These flags should already have been checked by caller.
3538 Label ok;
3539 uint32_t FlagsToCheck =
3540 FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH;
3541 branchTest32(Assembler::Zero, funFlags, Imm32(FlagsToCheck), &ok);
3542 assumeUnreachable("The function flags should already have been checked.");
3543 bind(&ok);
3544 }
3545 #endif // DEBUG
3546
3547 // NOTE: `funFlags` and `output` must be allowed to alias.
3548
3549 // Load the target function's length.
3550 Label isInterpreted, isBound, lengthLoaded;
3551 branchTest32(Assembler::NonZero, funFlags, Imm32(FunctionFlags::BOUND_FUN),
3552 &isBound);
3553 branchTest32(Assembler::NonZero, funFlags, Imm32(FunctionFlags::BASESCRIPT),
3554 &isInterpreted);
3555 {
3556 // Load the length property of a native function.
3557 load16ZeroExtend(Address(func, JSFunction::offsetOfNargs()), output);
3558 jump(&lengthLoaded);
3559 }
3560 bind(&isBound);
3561 {
3562 // Load the length property of a bound function.
3563 Address boundLength(func,
3564 FunctionExtended::offsetOfBoundFunctionLengthSlot());
3565 fallibleUnboxInt32(boundLength, output, slowPath);
3566 jump(&lengthLoaded);
3567 }
3568 bind(&isInterpreted);
3569 {
3570 // Load the length property of an interpreted function.
3571 loadPtr(Address(func, JSFunction::offsetOfScript()), output);
3572 loadPtr(Address(output, JSScript::offsetOfSharedData()), output);
3573 branchTestPtr(Assembler::Zero, output, output, slowPath);
3574 loadPtr(Address(output, SharedImmutableScriptData::offsetOfISD()), output);
3575 load16ZeroExtend(Address(output, ImmutableScriptData::offsetOfFunLength()),
3576 output);
3577 }
3578 bind(&lengthLoaded);
3579 }
3580
loadFunctionName(Register func,Register output,ImmGCPtr emptyString,Label * slowPath)3581 void MacroAssembler::loadFunctionName(Register func, Register output,
3582 ImmGCPtr emptyString, Label* slowPath) {
3583 MOZ_ASSERT(func != output);
3584
3585 // Get the JSFunction flags.
3586 load16ZeroExtend(Address(func, JSFunction::offsetOfFlags()), output);
3587
3588 // If the name was previously resolved, the name property may be shadowed.
3589 branchTest32(Assembler::NonZero, output, Imm32(FunctionFlags::RESOLVED_NAME),
3590 slowPath);
3591
3592 Label notBoundTarget, loadName;
3593 branchTest32(Assembler::Zero, output, Imm32(FunctionFlags::BOUND_FUN),
3594 ¬BoundTarget);
3595 {
3596 // Call into the VM if the target's name atom doesn't contain the bound
3597 // function prefix.
3598 branchTest32(Assembler::Zero, output,
3599 Imm32(FunctionFlags::HAS_BOUND_FUNCTION_NAME_PREFIX),
3600 slowPath);
3601
3602 // Bound functions reuse HAS_GUESSED_ATOM for
3603 // HAS_BOUND_FUNCTION_NAME_PREFIX, so skip the guessed atom check below.
3604 static_assert(
3605 FunctionFlags::HAS_BOUND_FUNCTION_NAME_PREFIX ==
3606 FunctionFlags::HAS_GUESSED_ATOM,
3607 "HAS_BOUND_FUNCTION_NAME_PREFIX is shared with HAS_GUESSED_ATOM");
3608 jump(&loadName);
3609 }
3610 bind(¬BoundTarget);
3611
3612 Label guessed, hasName;
3613 branchTest32(Assembler::NonZero, output,
3614 Imm32(FunctionFlags::HAS_GUESSED_ATOM), &guessed);
3615 bind(&loadName);
3616 loadPtr(Address(func, JSFunction::offsetOfAtom()), output);
3617 branchTestPtr(Assembler::NonZero, output, output, &hasName);
3618 {
3619 bind(&guessed);
3620
3621 // An absent name property defaults to the empty string.
3622 movePtr(emptyString, output);
3623 }
3624 bind(&hasName);
3625 }
3626
branchTestType(Condition cond,Register tag,JSValueType type,Label * label)3627 void MacroAssembler::branchTestType(Condition cond, Register tag,
3628 JSValueType type, Label* label) {
3629 switch (type) {
3630 case JSVAL_TYPE_DOUBLE:
3631 branchTestDouble(cond, tag, label);
3632 break;
3633 case JSVAL_TYPE_INT32:
3634 branchTestInt32(cond, tag, label);
3635 break;
3636 case JSVAL_TYPE_BOOLEAN:
3637 branchTestBoolean(cond, tag, label);
3638 break;
3639 case JSVAL_TYPE_UNDEFINED:
3640 branchTestUndefined(cond, tag, label);
3641 break;
3642 case JSVAL_TYPE_NULL:
3643 branchTestNull(cond, tag, label);
3644 break;
3645 case JSVAL_TYPE_MAGIC:
3646 branchTestMagic(cond, tag, label);
3647 break;
3648 case JSVAL_TYPE_STRING:
3649 branchTestString(cond, tag, label);
3650 break;
3651 case JSVAL_TYPE_SYMBOL:
3652 branchTestSymbol(cond, tag, label);
3653 break;
3654 case JSVAL_TYPE_BIGINT:
3655 branchTestBigInt(cond, tag, label);
3656 break;
3657 case JSVAL_TYPE_OBJECT:
3658 branchTestObject(cond, tag, label);
3659 break;
3660 default:
3661 MOZ_CRASH("Unexpected value type");
3662 }
3663 }
3664
branchTestObjCompartment(Condition cond,Register obj,const Address & compartment,Register scratch,Label * label)3665 void MacroAssembler::branchTestObjCompartment(Condition cond, Register obj,
3666 const Address& compartment,
3667 Register scratch, Label* label) {
3668 MOZ_ASSERT(obj != scratch);
3669 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
3670 loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
3671 loadPtr(Address(scratch, BaseShape::offsetOfRealm()), scratch);
3672 loadPtr(Address(scratch, Realm::offsetOfCompartment()), scratch);
3673 branchPtr(cond, compartment, scratch, label);
3674 }
3675
branchTestObjCompartment(Condition cond,Register obj,const JS::Compartment * compartment,Register scratch,Label * label)3676 void MacroAssembler::branchTestObjCompartment(
3677 Condition cond, Register obj, const JS::Compartment* compartment,
3678 Register scratch, Label* label) {
3679 MOZ_ASSERT(obj != scratch);
3680 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
3681 loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
3682 loadPtr(Address(scratch, BaseShape::offsetOfRealm()), scratch);
3683 loadPtr(Address(scratch, Realm::offsetOfCompartment()), scratch);
3684 branchPtr(cond, scratch, ImmPtr(compartment), label);
3685 }
3686
branchIfNonNativeObj(Register obj,Register scratch,Label * label)3687 void MacroAssembler::branchIfNonNativeObj(Register obj, Register scratch,
3688 Label* label) {
3689 loadObjClassUnsafe(obj, scratch);
3690 branchTest32(Assembler::NonZero, Address(scratch, JSClass::offsetOfFlags()),
3691 Imm32(JSClass::NON_NATIVE), label);
3692 }
3693
branchIfObjectNotExtensible(Register obj,Register scratch,Label * label)3694 void MacroAssembler::branchIfObjectNotExtensible(Register obj, Register scratch,
3695 Label* label) {
3696 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
3697
3698 // Spectre-style checks are not needed here because we do not interpret data
3699 // based on this check.
3700 static_assert(sizeof(ObjectFlags) == sizeof(uint16_t));
3701 load16ZeroExtend(Address(scratch, Shape::offsetOfObjectFlags()), scratch);
3702 branchTest32(Assembler::NonZero, scratch,
3703 Imm32(uint32_t(ObjectFlag::NotExtensible)), label);
3704 }
3705
wasmTrap(wasm::Trap trap,wasm::BytecodeOffset bytecodeOffset)3706 void MacroAssembler::wasmTrap(wasm::Trap trap,
3707 wasm::BytecodeOffset bytecodeOffset) {
3708 uint32_t trapOffset = wasmTrapInstruction().offset();
3709 MOZ_ASSERT_IF(!oom(),
3710 currentOffset() - trapOffset == WasmTrapInstructionLength);
3711
3712 append(trap, wasm::TrapSite(trapOffset, bytecodeOffset));
3713 }
3714
wasmInterruptCheck(Register tls,wasm::BytecodeOffset bytecodeOffset)3715 void MacroAssembler::wasmInterruptCheck(Register tls,
3716 wasm::BytecodeOffset bytecodeOffset) {
3717 Label ok;
3718 branch32(Assembler::Equal, Address(tls, offsetof(wasm::TlsData, interrupt)),
3719 Imm32(0), &ok);
3720 wasmTrap(wasm::Trap::CheckInterrupt, bytecodeOffset);
3721 bind(&ok);
3722 }
3723
3724 #ifdef ENABLE_WASM_EXCEPTIONS
wasmStartTry()3725 size_t MacroAssembler::wasmStartTry() {
3726 wasm::WasmTryNote tryNote = wasm::WasmTryNote(currentOffset(), 0, 0);
3727 return append(tryNote);
3728 }
3729 #endif
3730
wasmReserveStackChecked(uint32_t amount,wasm::BytecodeOffset trapOffset)3731 std::pair<CodeOffset, uint32_t> MacroAssembler::wasmReserveStackChecked(
3732 uint32_t amount, wasm::BytecodeOffset trapOffset) {
3733 if (amount > MAX_UNCHECKED_LEAF_FRAME_SIZE) {
3734 // The frame is large. Don't bump sp until after the stack limit check so
3735 // that the trap handler isn't called with a wild sp.
3736 Label ok;
3737 Register scratch = ABINonArgReg0;
3738 moveStackPtrTo(scratch);
3739
3740 Label trap;
3741 branchPtr(Assembler::Below, scratch, Imm32(amount), &trap);
3742 subPtr(Imm32(amount), scratch);
3743 branchPtr(Assembler::Below,
3744 Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)), scratch,
3745 &ok);
3746
3747 bind(&trap);
3748 wasmTrap(wasm::Trap::StackOverflow, trapOffset);
3749 CodeOffset trapInsnOffset = CodeOffset(currentOffset());
3750
3751 bind(&ok);
3752 reserveStack(amount);
3753 return std::pair<CodeOffset, uint32_t>(trapInsnOffset, 0);
3754 }
3755
3756 reserveStack(amount);
3757 Label ok;
3758 branchStackPtrRhs(Assembler::Below,
3759 Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)),
3760 &ok);
3761 wasmTrap(wasm::Trap::StackOverflow, trapOffset);
3762 CodeOffset trapInsnOffset = CodeOffset(currentOffset());
3763 bind(&ok);
3764 return std::pair<CodeOffset, uint32_t>(trapInsnOffset, amount);
3765 }
3766
wasmCallImport(const wasm::CallSiteDesc & desc,const wasm::CalleeDesc & callee)3767 CodeOffset MacroAssembler::wasmCallImport(const wasm::CallSiteDesc& desc,
3768 const wasm::CalleeDesc& callee) {
3769 storePtr(WasmTlsReg,
3770 Address(getStackPointer(), WasmCallerTLSOffsetBeforeCall));
3771
3772 // Load the callee, before the caller's registers are clobbered.
3773 uint32_t globalDataOffset = callee.importGlobalDataOffset();
3774 loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, code),
3775 ABINonArgReg0);
3776
3777 #ifndef JS_CODEGEN_NONE
3778 static_assert(ABINonArgReg0 != WasmTlsReg, "by constraint");
3779 #endif
3780
3781 // Switch to the callee's realm.
3782 loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, realm),
3783 ABINonArgReg1);
3784 loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), ABINonArgReg2);
3785 storePtr(ABINonArgReg1, Address(ABINonArgReg2, JSContext::offsetOfRealm()));
3786
3787 // Switch to the callee's TLS and pinned registers and make the call.
3788 loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, tls),
3789 WasmTlsReg);
3790
3791 storePtr(WasmTlsReg,
3792 Address(getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
3793 loadWasmPinnedRegsFromTls();
3794
3795 return call(desc, ABINonArgReg0);
3796 }
3797
wasmCallBuiltinInstanceMethod(const wasm::CallSiteDesc & desc,const ABIArg & instanceArg,wasm::SymbolicAddress builtin,wasm::FailureMode failureMode)3798 CodeOffset MacroAssembler::wasmCallBuiltinInstanceMethod(
3799 const wasm::CallSiteDesc& desc, const ABIArg& instanceArg,
3800 wasm::SymbolicAddress builtin, wasm::FailureMode failureMode) {
3801 MOZ_ASSERT(instanceArg != ABIArg());
3802
3803 storePtr(WasmTlsReg,
3804 Address(getStackPointer(), WasmCallerTLSOffsetBeforeCall));
3805 storePtr(WasmTlsReg,
3806 Address(getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
3807
3808 if (instanceArg.kind() == ABIArg::GPR) {
3809 loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)),
3810 instanceArg.gpr());
3811 } else if (instanceArg.kind() == ABIArg::Stack) {
3812 // Safe to use ABINonArgReg0 since it's the last thing before the call.
3813 Register scratch = ABINonArgReg0;
3814 loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), scratch);
3815 storePtr(scratch,
3816 Address(getStackPointer(), instanceArg.offsetFromArgBase()));
3817 } else {
3818 MOZ_CRASH("Unknown abi passing style for pointer");
3819 }
3820
3821 CodeOffset ret = call(desc, builtin);
3822
3823 if (failureMode != wasm::FailureMode::Infallible) {
3824 Label noTrap;
3825 switch (failureMode) {
3826 case wasm::FailureMode::Infallible:
3827 MOZ_CRASH();
3828 case wasm::FailureMode::FailOnNegI32:
3829 branchTest32(Assembler::NotSigned, ReturnReg, ReturnReg, &noTrap);
3830 break;
3831 case wasm::FailureMode::FailOnNullPtr:
3832 branchTestPtr(Assembler::NonZero, ReturnReg, ReturnReg, &noTrap);
3833 break;
3834 case wasm::FailureMode::FailOnInvalidRef:
3835 branchPtr(Assembler::NotEqual, ReturnReg,
3836 ImmWord(uintptr_t(wasm::AnyRef::invalid().forCompiledCode())),
3837 &noTrap);
3838 break;
3839 }
3840 wasmTrap(wasm::Trap::ThrowReported,
3841 wasm::BytecodeOffset(desc.lineOrBytecode()));
3842 bind(&noTrap);
3843 }
3844
3845 return ret;
3846 }
3847
wasmCallIndirect(const wasm::CallSiteDesc & desc,const wasm::CalleeDesc & callee,bool needsBoundsCheck)3848 CodeOffset MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc,
3849 const wasm::CalleeDesc& callee,
3850 bool needsBoundsCheck) {
3851 Register scratch = WasmTableCallScratchReg0;
3852 Register index = WasmTableCallIndexReg;
3853
3854 // Optimization opportunity: when offsetof(FunctionTableElem, code) == 0, as
3855 // it is at present, we can probably generate better code here by folding
3856 // the address computation into the load.
3857
3858 static_assert(sizeof(wasm::FunctionTableElem) == 8 ||
3859 sizeof(wasm::FunctionTableElem) == 16,
3860 "elements of function tables are two words");
3861
3862 if (callee.which() == wasm::CalleeDesc::AsmJSTable) {
3863 // asm.js tables require no signature check, and have had their index
3864 // masked into range and thus need no bounds check.
3865 loadWasmGlobalPtr(callee.tableFunctionBaseGlobalDataOffset(), scratch);
3866 if (sizeof(wasm::FunctionTableElem) == 8) {
3867 computeEffectiveAddress(BaseIndex(scratch, index, TimesEight), scratch);
3868 } else {
3869 lshift32(Imm32(4), index);
3870 addPtr(index, scratch);
3871 }
3872 loadPtr(Address(scratch, offsetof(wasm::FunctionTableElem, code)), scratch);
3873 storePtr(WasmTlsReg,
3874 Address(getStackPointer(), WasmCallerTLSOffsetBeforeCall));
3875 storePtr(WasmTlsReg,
3876 Address(getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
3877 return call(desc, scratch);
3878 }
3879
3880 MOZ_ASSERT(callee.which() == wasm::CalleeDesc::WasmTable);
3881
3882 // Write the functype-id into the ABI functype-id register.
3883 wasm::TypeIdDesc funcTypeId = callee.wasmTableSigId();
3884 switch (funcTypeId.kind()) {
3885 case wasm::TypeIdDescKind::Global:
3886 loadWasmGlobalPtr(funcTypeId.globalDataOffset(), WasmTableCallSigReg);
3887 break;
3888 case wasm::TypeIdDescKind::Immediate:
3889 move32(Imm32(funcTypeId.immediate()), WasmTableCallSigReg);
3890 break;
3891 case wasm::TypeIdDescKind::None:
3892 break;
3893 }
3894
3895 wasm::BytecodeOffset trapOffset(desc.lineOrBytecode());
3896
3897 // WebAssembly throws if the index is out-of-bounds.
3898 if (needsBoundsCheck) {
3899 loadWasmGlobalPtr(callee.tableLengthGlobalDataOffset(), scratch);
3900
3901 Label ok;
3902 branch32(Assembler::Condition::Below, index, scratch, &ok);
3903 wasmTrap(wasm::Trap::OutOfBounds, trapOffset);
3904 bind(&ok);
3905 }
3906
3907 // Load the base pointer of the table.
3908 loadWasmGlobalPtr(callee.tableFunctionBaseGlobalDataOffset(), scratch);
3909
3910 // Load the callee from the table.
3911 if (sizeof(wasm::FunctionTableElem) == 8) {
3912 computeEffectiveAddress(BaseIndex(scratch, index, TimesEight), scratch);
3913 } else {
3914 lshift32(Imm32(4), index);
3915 addPtr(index, scratch);
3916 }
3917
3918 storePtr(WasmTlsReg,
3919 Address(getStackPointer(), WasmCallerTLSOffsetBeforeCall));
3920 loadPtr(Address(scratch, offsetof(wasm::FunctionTableElem, tls)), WasmTlsReg);
3921 storePtr(WasmTlsReg,
3922 Address(getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
3923
3924 Label nonNull;
3925 branchTest32(Assembler::NonZero, WasmTlsReg, WasmTlsReg, &nonNull);
3926 wasmTrap(wasm::Trap::IndirectCallToNull, trapOffset);
3927 bind(&nonNull);
3928
3929 loadWasmPinnedRegsFromTls();
3930 switchToWasmTlsRealm(index, WasmTableCallScratchReg1);
3931
3932 loadPtr(Address(scratch, offsetof(wasm::FunctionTableElem, code)), scratch);
3933
3934 return call(desc, scratch);
3935 }
3936
nopPatchableToCall(const wasm::CallSiteDesc & desc)3937 void MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc& desc) {
3938 CodeOffset offset = nopPatchableToCall();
3939 append(desc, offset);
3940 }
3941
emitPreBarrierFastPath(JSRuntime * rt,MIRType type,Register temp1,Register temp2,Register temp3,Label * noBarrier)3942 void MacroAssembler::emitPreBarrierFastPath(JSRuntime* rt, MIRType type,
3943 Register temp1, Register temp2,
3944 Register temp3, Label* noBarrier) {
3945 MOZ_ASSERT(temp1 != PreBarrierReg);
3946 MOZ_ASSERT(temp2 != PreBarrierReg);
3947 MOZ_ASSERT(temp3 != PreBarrierReg);
3948
3949 // Load the GC thing in temp1.
3950 if (type == MIRType::Value) {
3951 unboxGCThingForGCBarrier(Address(PreBarrierReg, 0), temp1);
3952 } else {
3953 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
3954 type == MIRType::Shape);
3955 loadPtr(Address(PreBarrierReg, 0), temp1);
3956 }
3957
3958 #ifdef DEBUG
3959 // The caller should have checked for null pointers.
3960 Label nonZero;
3961 branchTestPtr(Assembler::NonZero, temp1, temp1, &nonZero);
3962 assumeUnreachable("JIT pre-barrier: unexpected nullptr");
3963 bind(&nonZero);
3964 #endif
3965
3966 // Load the chunk address in temp2.
3967 movePtr(ImmWord(~gc::ChunkMask), temp2);
3968 andPtr(temp1, temp2);
3969
3970 // If the GC thing is in the nursery, we don't need to barrier it.
3971 if (type == MIRType::Value || type == MIRType::Object ||
3972 type == MIRType::String) {
3973 branchPtr(Assembler::NotEqual, Address(temp2, gc::ChunkStoreBufferOffset),
3974 ImmWord(0), noBarrier);
3975 } else {
3976 #ifdef DEBUG
3977 Label isTenured;
3978 branchPtr(Assembler::Equal, Address(temp2, gc::ChunkStoreBufferOffset),
3979 ImmWord(0), &isTenured);
3980 assumeUnreachable("JIT pre-barrier: unexpected nursery pointer");
3981 bind(&isTenured);
3982 #endif
3983 }
3984
3985 // If it's a permanent atom or symbol from a parent runtime we don't
3986 // need to barrier it.
3987 if (type == MIRType::Value || type == MIRType::String) {
3988 branchPtr(Assembler::NotEqual, Address(temp2, gc::ChunkRuntimeOffset),
3989 ImmPtr(rt), noBarrier);
3990 } else {
3991 #ifdef DEBUG
3992 Label thisRuntime;
3993 branchPtr(Assembler::Equal, Address(temp2, gc::ChunkRuntimeOffset),
3994 ImmPtr(rt), &thisRuntime);
3995 assumeUnreachable("JIT pre-barrier: unexpected runtime");
3996 bind(&thisRuntime);
3997 #endif
3998 }
3999
4000 // Determine the bit index and store in temp1.
4001 //
4002 // bit = (addr & js::gc::ChunkMask) / js::gc::CellBytesPerMarkBit +
4003 // static_cast<uint32_t>(colorBit);
4004 static_assert(gc::CellBytesPerMarkBit == 8,
4005 "Calculation below relies on this");
4006 static_assert(size_t(gc::ColorBit::BlackBit) == 0,
4007 "Calculation below relies on this");
4008 andPtr(Imm32(gc::ChunkMask), temp1);
4009 rshiftPtr(Imm32(3), temp1);
4010
4011 static_assert(gc::MarkBitmapWordBits == JS_BITS_PER_WORD,
4012 "Calculation below relies on this");
4013
4014 // Load the bitmap word in temp2.
4015 //
4016 // word = chunk.bitmap[bit / MarkBitmapWordBits];
4017
4018 // Fold the adjustment for the fact that arenas don't start at the beginning
4019 // of the chunk into the offset to the chunk bitmap.
4020 const size_t firstArenaAdjustment = gc::FirstArenaAdjustmentBits / CHAR_BIT;
4021 const intptr_t offset =
4022 intptr_t(gc::ChunkMarkBitmapOffset) - intptr_t(firstArenaAdjustment);
4023
4024 movePtr(temp1, temp3);
4025 #if JS_BITS_PER_WORD == 64
4026 rshiftPtr(Imm32(6), temp1);
4027 loadPtr(BaseIndex(temp2, temp1, TimesEight, offset), temp2);
4028 #else
4029 rshiftPtr(Imm32(5), temp1);
4030 loadPtr(BaseIndex(temp2, temp1, TimesFour, offset), temp2);
4031 #endif
4032
4033 // Load the mask in temp1.
4034 //
4035 // mask = uintptr_t(1) << (bit % MarkBitmapWordBits);
4036 andPtr(Imm32(gc::MarkBitmapWordBits - 1), temp3);
4037 move32(Imm32(1), temp1);
4038 #ifdef JS_CODEGEN_X64
4039 MOZ_ASSERT(temp3 == rcx);
4040 shlq_cl(temp1);
4041 #elif JS_CODEGEN_X86
4042 MOZ_ASSERT(temp3 == ecx);
4043 shll_cl(temp1);
4044 #elif JS_CODEGEN_ARM
4045 ma_lsl(temp3, temp1, temp1);
4046 #elif JS_CODEGEN_ARM64
4047 Lsl(ARMRegister(temp1, 64), ARMRegister(temp1, 64), ARMRegister(temp3, 64));
4048 #elif JS_CODEGEN_MIPS32
4049 ma_sll(temp1, temp1, temp3);
4050 #elif JS_CODEGEN_MIPS64
4051 ma_dsll(temp1, temp1, temp3);
4052 #elif JS_CODEGEN_NONE
4053 MOZ_CRASH();
4054 #else
4055 # error "Unknown architecture"
4056 #endif
4057
4058 // No barrier is needed if the bit is set, |word & mask != 0|.
4059 branchTestPtr(Assembler::NonZero, temp2, temp1, noBarrier);
4060 }
4061
4062 // ========================================================================
4063 // JS atomic operations.
4064
atomicIsLockFreeJS(Register value,Register output)4065 void MacroAssembler::atomicIsLockFreeJS(Register value, Register output) {
4066 // Keep this in sync with isLockfreeJS() in jit/AtomicOperations.h.
4067 static_assert(AtomicOperations::isLockfreeJS(1)); // Implementation artifact
4068 static_assert(AtomicOperations::isLockfreeJS(2)); // Implementation artifact
4069 static_assert(AtomicOperations::isLockfreeJS(4)); // Spec requirement
4070 static_assert(AtomicOperations::isLockfreeJS(8)); // Implementation artifact
4071
4072 Label done;
4073 move32(Imm32(1), output);
4074 branch32(Assembler::Equal, value, Imm32(8), &done);
4075 branch32(Assembler::Equal, value, Imm32(4), &done);
4076 branch32(Assembler::Equal, value, Imm32(2), &done);
4077 branch32(Assembler::Equal, value, Imm32(1), &done);
4078 move32(Imm32(0), output);
4079 bind(&done);
4080 }
4081
4082 // ========================================================================
4083 // Spectre Mitigations.
4084
spectreMaskIndex32(Register index,Register length,Register output)4085 void MacroAssembler::spectreMaskIndex32(Register index, Register length,
4086 Register output) {
4087 MOZ_ASSERT(JitOptions.spectreIndexMasking);
4088 MOZ_ASSERT(length != output);
4089 MOZ_ASSERT(index != output);
4090
4091 move32(Imm32(0), output);
4092 cmp32Move32(Assembler::Below, index, length, index, output);
4093 }
4094
spectreMaskIndex32(Register index,const Address & length,Register output)4095 void MacroAssembler::spectreMaskIndex32(Register index, const Address& length,
4096 Register output) {
4097 MOZ_ASSERT(JitOptions.spectreIndexMasking);
4098 MOZ_ASSERT(index != length.base);
4099 MOZ_ASSERT(length.base != output);
4100 MOZ_ASSERT(index != output);
4101
4102 move32(Imm32(0), output);
4103 cmp32Move32(Assembler::Below, index, length, index, output);
4104 }
4105
spectreMaskIndexPtr(Register index,Register length,Register output)4106 void MacroAssembler::spectreMaskIndexPtr(Register index, Register length,
4107 Register output) {
4108 MOZ_ASSERT(JitOptions.spectreIndexMasking);
4109 MOZ_ASSERT(length != output);
4110 MOZ_ASSERT(index != output);
4111
4112 movePtr(ImmWord(0), output);
4113 cmpPtrMovePtr(Assembler::Below, index, length, index, output);
4114 }
4115
spectreMaskIndexPtr(Register index,const Address & length,Register output)4116 void MacroAssembler::spectreMaskIndexPtr(Register index, const Address& length,
4117 Register output) {
4118 MOZ_ASSERT(JitOptions.spectreIndexMasking);
4119 MOZ_ASSERT(index != length.base);
4120 MOZ_ASSERT(length.base != output);
4121 MOZ_ASSERT(index != output);
4122
4123 movePtr(ImmWord(0), output);
4124 cmpPtrMovePtr(Assembler::Below, index, length, index, output);
4125 }
4126
boundsCheck32PowerOfTwo(Register index,uint32_t length,Label * failure)4127 void MacroAssembler::boundsCheck32PowerOfTwo(Register index, uint32_t length,
4128 Label* failure) {
4129 MOZ_ASSERT(mozilla::IsPowerOfTwo(length));
4130 branch32(Assembler::AboveOrEqual, index, Imm32(length), failure);
4131
4132 // Note: it's fine to clobber the input register, as this is a no-op: it
4133 // only affects speculative execution.
4134 if (JitOptions.spectreIndexMasking) {
4135 and32(Imm32(length - 1), index);
4136 }
4137 }
4138
4139 //}}} check_macroassembler_style
4140
memoryBarrierBefore(const Synchronization & sync)4141 void MacroAssembler::memoryBarrierBefore(const Synchronization& sync) {
4142 memoryBarrier(sync.barrierBefore);
4143 }
4144
memoryBarrierAfter(const Synchronization & sync)4145 void MacroAssembler::memoryBarrierAfter(const Synchronization& sync) {
4146 memoryBarrier(sync.barrierAfter);
4147 }
4148
debugAssertIsObject(const ValueOperand & val)4149 void MacroAssembler::debugAssertIsObject(const ValueOperand& val) {
4150 #ifdef DEBUG
4151 Label ok;
4152 branchTestObject(Assembler::Equal, val, &ok);
4153 assumeUnreachable("Expected an object!");
4154 bind(&ok);
4155 #endif
4156 }
4157
debugAssertObjHasFixedSlots(Register obj,Register scratch)4158 void MacroAssembler::debugAssertObjHasFixedSlots(Register obj,
4159 Register scratch) {
4160 #ifdef DEBUG
4161 Label hasFixedSlots;
4162 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
4163 branchTest32(Assembler::NonZero,
4164 Address(scratch, Shape::offsetOfImmutableFlags()),
4165 Imm32(Shape::fixedSlotsMask()), &hasFixedSlots);
4166 assumeUnreachable("Expected a fixed slot");
4167 bind(&hasFixedSlots);
4168 #endif
4169 }
4170
branchArrayIsNotPacked(Register array,Register temp1,Register temp2,Label * label)4171 void MacroAssembler::branchArrayIsNotPacked(Register array, Register temp1,
4172 Register temp2, Label* label) {
4173 loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);
4174
4175 // Test length == initializedLength.
4176 Label done;
4177 Address initLength(temp1, ObjectElements::offsetOfInitializedLength());
4178 load32(Address(temp1, ObjectElements::offsetOfLength()), temp2);
4179 branch32(Assembler::NotEqual, initLength, temp2, label);
4180
4181 // Test the NON_PACKED flag.
4182 Address flags(temp1, ObjectElements::offsetOfFlags());
4183 branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::NON_PACKED),
4184 label);
4185 }
4186
setIsPackedArray(Register obj,Register output,Register temp)4187 void MacroAssembler::setIsPackedArray(Register obj, Register output,
4188 Register temp) {
4189 // Ensure it's an ArrayObject.
4190 Label notPackedArray;
4191 branchTestObjClass(Assembler::NotEqual, obj, &ArrayObject::class_, temp, obj,
4192 ¬PackedArray);
4193
4194 branchArrayIsNotPacked(obj, temp, output, ¬PackedArray);
4195
4196 Label done;
4197 move32(Imm32(1), output);
4198 jump(&done);
4199
4200 bind(¬PackedArray);
4201 move32(Imm32(0), output);
4202
4203 bind(&done);
4204 }
4205
packedArrayPop(Register array,ValueOperand output,Register temp1,Register temp2,Label * fail)4206 void MacroAssembler::packedArrayPop(Register array, ValueOperand output,
4207 Register temp1, Register temp2,
4208 Label* fail) {
4209 // Load obj->elements in temp1.
4210 loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);
4211
4212 // Check flags.
4213 static constexpr uint32_t UnhandledFlags =
4214 ObjectElements::Flags::NON_PACKED |
4215 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH |
4216 ObjectElements::Flags::NOT_EXTENSIBLE |
4217 ObjectElements::Flags::MAYBE_IN_ITERATION;
4218 Address flags(temp1, ObjectElements::offsetOfFlags());
4219 branchTest32(Assembler::NonZero, flags, Imm32(UnhandledFlags), fail);
4220
4221 // Load length in temp2. Ensure length == initializedLength.
4222 Address lengthAddr(temp1, ObjectElements::offsetOfLength());
4223 Address initLengthAddr(temp1, ObjectElements::offsetOfInitializedLength());
4224 load32(lengthAddr, temp2);
4225 branch32(Assembler::NotEqual, initLengthAddr, temp2, fail);
4226
4227 // Result is |undefined| if length == 0.
4228 Label notEmpty, done;
4229 branchTest32(Assembler::NonZero, temp2, temp2, ¬Empty);
4230 {
4231 moveValue(UndefinedValue(), output);
4232 jump(&done);
4233 }
4234
4235 bind(¬Empty);
4236
4237 // Load the last element.
4238 sub32(Imm32(1), temp2);
4239 BaseObjectElementIndex elementAddr(temp1, temp2);
4240 loadValue(elementAddr, output);
4241
4242 // Pre-barrier the element because we're removing it from the array.
4243 EmitPreBarrier(*this, elementAddr, MIRType::Value);
4244
4245 // Update length and initializedLength.
4246 store32(temp2, lengthAddr);
4247 store32(temp2, initLengthAddr);
4248
4249 bind(&done);
4250 }
4251
packedArrayShift(Register array,ValueOperand output,Register temp1,Register temp2,LiveRegisterSet volatileRegs,Label * fail)4252 void MacroAssembler::packedArrayShift(Register array, ValueOperand output,
4253 Register temp1, Register temp2,
4254 LiveRegisterSet volatileRegs,
4255 Label* fail) {
4256 // Load obj->elements in temp1.
4257 loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);
4258
4259 // Check flags.
4260 static constexpr uint32_t UnhandledFlags =
4261 ObjectElements::Flags::NON_PACKED |
4262 ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH |
4263 ObjectElements::Flags::NOT_EXTENSIBLE |
4264 ObjectElements::Flags::MAYBE_IN_ITERATION;
4265 Address flags(temp1, ObjectElements::offsetOfFlags());
4266 branchTest32(Assembler::NonZero, flags, Imm32(UnhandledFlags), fail);
4267
4268 // Load length in temp2. Ensure length == initializedLength.
4269 Address lengthAddr(temp1, ObjectElements::offsetOfLength());
4270 Address initLengthAddr(temp1, ObjectElements::offsetOfInitializedLength());
4271 load32(lengthAddr, temp2);
4272 branch32(Assembler::NotEqual, initLengthAddr, temp2, fail);
4273
4274 // Result is |undefined| if length == 0.
4275 Label notEmpty, done;
4276 branchTest32(Assembler::NonZero, temp2, temp2, ¬Empty);
4277 {
4278 moveValue(UndefinedValue(), output);
4279 jump(&done);
4280 }
4281
4282 bind(¬Empty);
4283
4284 // Load the first element.
4285 Address elementAddr(temp1, 0);
4286 loadValue(elementAddr, output);
4287
4288 // Pre-barrier the element because we're removing it from the array.
4289 EmitPreBarrier(*this, elementAddr, MIRType::Value);
4290
4291 // Move the other elements.
4292 {
4293 // Ensure output and temp2 are in volatileRegs. Don't preserve temp1.
4294 volatileRegs.takeUnchecked(temp1);
4295 if (output.hasVolatileReg()) {
4296 volatileRegs.addUnchecked(output);
4297 }
4298 if (temp2.volatile_()) {
4299 volatileRegs.addUnchecked(temp2);
4300 }
4301
4302 PushRegsInMask(volatileRegs);
4303
4304 using Fn = void (*)(ArrayObject * arr);
4305 setupUnalignedABICall(temp1);
4306 passABIArg(array);
4307 callWithABI<Fn, ArrayShiftMoveElements>();
4308
4309 PopRegsInMask(volatileRegs);
4310
4311 // Reload the elements. The call may have updated it.
4312 loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);
4313 }
4314
4315 // Update length and initializedLength.
4316 sub32(Imm32(1), temp2);
4317 store32(temp2, lengthAddr);
4318 store32(temp2, initLengthAddr);
4319
4320 bind(&done);
4321 }
4322
loadArgumentsObjectElement(Register obj,Register index,ValueOperand output,Register temp,Label * fail)4323 void MacroAssembler::loadArgumentsObjectElement(Register obj, Register index,
4324 ValueOperand output,
4325 Register temp, Label* fail) {
4326 Register temp2 = output.scratchReg();
4327
4328 // Get initial length value.
4329 unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), temp);
4330
4331 // Ensure no overridden elements.
4332 branchTest32(Assembler::NonZero, temp,
4333 Imm32(ArgumentsObject::ELEMENT_OVERRIDDEN_BIT), fail);
4334
4335 // Bounds check.
4336 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), temp);
4337 spectreBoundsCheck32(index, temp, temp2, fail);
4338
4339 // Load ArgumentsData.
4340 loadPrivate(Address(obj, ArgumentsObject::getDataSlotOffset()), temp);
4341
4342 // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
4343 BaseValueIndex argValue(temp, index, ArgumentsData::offsetOfArgs());
4344 branchTestMagic(Assembler::Equal, argValue, fail);
4345 loadValue(argValue, output);
4346 }
4347
loadArgumentsObjectLength(Register obj,Register output,Label * fail)4348 void MacroAssembler::loadArgumentsObjectLength(Register obj, Register output,
4349 Label* fail) {
4350 // Get initial length value.
4351 unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()),
4352 output);
4353
4354 // Test if length has been overridden.
4355 branchTest32(Assembler::NonZero, output,
4356 Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT), fail);
4357
4358 // Shift out arguments length and return it.
4359 rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), output);
4360 }
4361
branchTestArgumentsObjectFlags(Register obj,Register temp,uint32_t flags,Condition cond,Label * label)4362 void MacroAssembler::branchTestArgumentsObjectFlags(Register obj, Register temp,
4363 uint32_t flags,
4364 Condition cond,
4365 Label* label) {
4366 MOZ_ASSERT((flags & ~ArgumentsObject::PACKED_BITS_MASK) == 0);
4367
4368 // Get initial length value.
4369 unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), temp);
4370
4371 // Test flags.
4372 branchTest32(cond, temp, Imm32(flags), label);
4373 }
4374
ValidateSizeRange(Scalar::Type from,Scalar::Type to)4375 static constexpr bool ValidateSizeRange(Scalar::Type from, Scalar::Type to) {
4376 for (Scalar::Type type = from; type < to; type = Scalar::Type(type + 1)) {
4377 if (TypedArrayElemSize(type) != TypedArrayElemSize(from)) {
4378 return false;
4379 }
4380 }
4381 return true;
4382 }
4383
typedArrayElementSize(Register obj,Register output)4384 void MacroAssembler::typedArrayElementSize(Register obj, Register output) {
4385 static_assert(Scalar::Int8 == 0, "Int8 is the first typed array class");
4386 static_assert(
4387 (Scalar::BigUint64 - Scalar::Int8) == Scalar::MaxTypedArrayViewType - 1,
4388 "BigUint64 is the last typed array class");
4389
4390 Label one, two, four, eight, done;
4391
4392 loadObjClassUnsafe(obj, output);
4393
4394 static_assert(ValidateSizeRange(Scalar::Int8, Scalar::Int16),
4395 "element size is one in [Int8, Int16)");
4396 branchPtr(Assembler::Below, output,
4397 ImmPtr(TypedArrayObject::classForType(Scalar::Int16)), &one);
4398
4399 static_assert(ValidateSizeRange(Scalar::Int16, Scalar::Int32),
4400 "element size is two in [Int16, Int32)");
4401 branchPtr(Assembler::Below, output,
4402 ImmPtr(TypedArrayObject::classForType(Scalar::Int32)), &two);
4403
4404 static_assert(ValidateSizeRange(Scalar::Int32, Scalar::Float64),
4405 "element size is four in [Int32, Float64)");
4406 branchPtr(Assembler::Below, output,
4407 ImmPtr(TypedArrayObject::classForType(Scalar::Float64)), &four);
4408
4409 static_assert(ValidateSizeRange(Scalar::Float64, Scalar::Uint8Clamped),
4410 "element size is eight in [Float64, Uint8Clamped)");
4411 branchPtr(Assembler::Below, output,
4412 ImmPtr(TypedArrayObject::classForType(Scalar::Uint8Clamped)),
4413 &eight);
4414
4415 static_assert(ValidateSizeRange(Scalar::Uint8Clamped, Scalar::BigInt64),
4416 "element size is one in [Uint8Clamped, BigInt64)");
4417 branchPtr(Assembler::Below, output,
4418 ImmPtr(TypedArrayObject::classForType(Scalar::BigInt64)), &one);
4419
4420 static_assert(
4421 ValidateSizeRange(Scalar::BigInt64, Scalar::MaxTypedArrayViewType),
4422 "element size is eight in [BigInt64, MaxTypedArrayViewType)");
4423 // Fall through for BigInt64 and BigUint64
4424
4425 bind(&eight);
4426 move32(Imm32(8), output);
4427 jump(&done);
4428
4429 bind(&four);
4430 move32(Imm32(4), output);
4431 jump(&done);
4432
4433 bind(&two);
4434 move32(Imm32(2), output);
4435 jump(&done);
4436
4437 bind(&one);
4438 move32(Imm32(1), output);
4439
4440 bind(&done);
4441 }
4442
branchIfClassIsNotTypedArray(Register clasp,Label * notTypedArray)4443 void MacroAssembler::branchIfClassIsNotTypedArray(Register clasp,
4444 Label* notTypedArray) {
4445 static_assert(Scalar::Int8 == 0, "Int8 is the first typed array class");
4446 const JSClass* firstTypedArrayClass =
4447 TypedArrayObject::classForType(Scalar::Int8);
4448
4449 static_assert(
4450 (Scalar::BigUint64 - Scalar::Int8) == Scalar::MaxTypedArrayViewType - 1,
4451 "BigUint64 is the last typed array class");
4452 const JSClass* lastTypedArrayClass =
4453 TypedArrayObject::classForType(Scalar::BigUint64);
4454
4455 branchPtr(Assembler::Below, clasp, ImmPtr(firstTypedArrayClass),
4456 notTypedArray);
4457 branchPtr(Assembler::Above, clasp, ImmPtr(lastTypedArrayClass),
4458 notTypedArray);
4459 }
4460
branchIfHasDetachedArrayBuffer(Register obj,Register temp,Label * label)4461 void MacroAssembler::branchIfHasDetachedArrayBuffer(Register obj, Register temp,
4462 Label* label) {
4463 // Inline implementation of ArrayBufferViewObject::hasDetachedBuffer().
4464
4465 // Load obj->elements in temp.
4466 loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
4467
4468 // Shared buffers can't be detached.
4469 Label done;
4470 branchTest32(Assembler::NonZero,
4471 Address(temp, ObjectElements::offsetOfFlags()),
4472 Imm32(ObjectElements::SHARED_MEMORY), &done);
4473
4474 // An ArrayBufferView with a null buffer has never had its buffer exposed to
4475 // become detached.
4476 fallibleUnboxObject(Address(obj, ArrayBufferViewObject::bufferOffset()), temp,
4477 &done);
4478
4479 // Load the ArrayBuffer flags and branch if the detached flag is set.
4480 unboxInt32(Address(temp, ArrayBufferObject::offsetOfFlagsSlot()), temp);
4481 branchTest32(Assembler::NonZero, temp, Imm32(ArrayBufferObject::DETACHED),
4482 label);
4483
4484 bind(&done);
4485 }
4486
branchIfNativeIteratorNotReusable(Register ni,Label * notReusable)4487 void MacroAssembler::branchIfNativeIteratorNotReusable(Register ni,
4488 Label* notReusable) {
4489 // See NativeIterator::isReusable.
4490 Address flagsAddr(ni, NativeIterator::offsetOfFlagsAndCount());
4491
4492 #ifdef DEBUG
4493 Label niIsInitialized;
4494 branchTest32(Assembler::NonZero, flagsAddr,
4495 Imm32(NativeIterator::Flags::Initialized), &niIsInitialized);
4496 assumeUnreachable(
4497 "Expected a NativeIterator that's been completely "
4498 "initialized");
4499 bind(&niIsInitialized);
4500 #endif
4501
4502 branchTest32(Assembler::NonZero, flagsAddr,
4503 Imm32(NativeIterator::Flags::NotReusable), notReusable);
4504 }
4505
LoadNativeIterator(MacroAssembler & masm,Register obj,Register dest)4506 static void LoadNativeIterator(MacroAssembler& masm, Register obj,
4507 Register dest) {
4508 MOZ_ASSERT(obj != dest);
4509
4510 #ifdef DEBUG
4511 // Assert we have a PropertyIteratorObject.
4512 Label ok;
4513 masm.branchTestObjClass(Assembler::Equal, obj,
4514 &PropertyIteratorObject::class_, dest, obj, &ok);
4515 masm.assumeUnreachable("Expected PropertyIteratorObject!");
4516 masm.bind(&ok);
4517 #endif
4518
4519 // Load NativeIterator object.
4520 masm.loadObjPrivate(obj, PropertyIteratorObject::NUM_FIXED_SLOTS, dest);
4521 }
4522
iteratorMore(Register obj,ValueOperand output,Register temp)4523 void MacroAssembler::iteratorMore(Register obj, ValueOperand output,
4524 Register temp) {
4525 Label done;
4526 Register outputScratch = output.scratchReg();
4527 LoadNativeIterator(*this, obj, outputScratch);
4528
4529 // If propertyCursor_ < propertiesEnd_, load the next string and advance
4530 // the cursor. Otherwise return MagicValue(JS_NO_ITER_VALUE).
4531 Label iterDone;
4532 Address cursorAddr(outputScratch, NativeIterator::offsetOfPropertyCursor());
4533 Address cursorEndAddr(outputScratch, NativeIterator::offsetOfPropertiesEnd());
4534 loadPtr(cursorAddr, temp);
4535 branchPtr(Assembler::BelowOrEqual, cursorEndAddr, temp, &iterDone);
4536
4537 // Get next string.
4538 loadPtr(Address(temp, 0), temp);
4539
4540 // Increase the cursor.
4541 addPtr(Imm32(sizeof(GCPtrLinearString)), cursorAddr);
4542
4543 tagValue(JSVAL_TYPE_STRING, temp, output);
4544 jump(&done);
4545
4546 bind(&iterDone);
4547 moveValue(MagicValue(JS_NO_ITER_VALUE), output);
4548
4549 bind(&done);
4550 }
4551
iteratorClose(Register obj,Register temp1,Register temp2,Register temp3)4552 void MacroAssembler::iteratorClose(Register obj, Register temp1, Register temp2,
4553 Register temp3) {
4554 LoadNativeIterator(*this, obj, temp1);
4555
4556 // Clear active bit.
4557 and32(Imm32(~NativeIterator::Flags::Active),
4558 Address(temp1, NativeIterator::offsetOfFlagsAndCount()));
4559
4560 // Reset property cursor.
4561 loadPtr(Address(temp1, NativeIterator::offsetOfShapesEnd()), temp2);
4562 storePtr(temp2, Address(temp1, NativeIterator::offsetOfPropertyCursor()));
4563
4564 // Unlink from the iterator list.
4565 const Register next = temp2;
4566 const Register prev = temp3;
4567 loadPtr(Address(temp1, NativeIterator::offsetOfNext()), next);
4568 loadPtr(Address(temp1, NativeIterator::offsetOfPrev()), prev);
4569 storePtr(prev, Address(next, NativeIterator::offsetOfPrev()));
4570 storePtr(next, Address(prev, NativeIterator::offsetOfNext()));
4571 #ifdef DEBUG
4572 storePtr(ImmPtr(nullptr), Address(temp1, NativeIterator::offsetOfNext()));
4573 storePtr(ImmPtr(nullptr), Address(temp1, NativeIterator::offsetOfPrev()));
4574 #endif
4575 }
4576
4577 // Can't push large frames blindly on windows, so we must touch frame memory
4578 // incrementally, with no more than 4096 - 1 bytes between touches.
4579 //
4580 // This is used across all platforms for simplicity.
touchFrameValues(Register numStackValues,Register scratch1,Register scratch2)4581 void MacroAssembler::touchFrameValues(Register numStackValues,
4582 Register scratch1, Register scratch2) {
4583 const size_t FRAME_TOUCH_INCREMENT = 2048;
4584 static_assert(FRAME_TOUCH_INCREMENT < 4096 - 1,
4585 "Frame increment is too large");
4586
4587 moveStackPtrTo(scratch2);
4588 mov(numStackValues, scratch1);
4589 lshiftPtr(Imm32(3), scratch1);
4590 subPtr(scratch1, scratch2);
4591 {
4592 moveStackPtrTo(scratch1);
4593 subPtr(Imm32(FRAME_TOUCH_INCREMENT), scratch1);
4594
4595 Label touchFrameLoop;
4596 Label touchFrameLoopEnd;
4597 bind(&touchFrameLoop);
4598 branchPtr(Assembler::Below, scratch1, scratch2, &touchFrameLoopEnd);
4599 store32(Imm32(0), Address(scratch1, 0));
4600 subPtr(Imm32(FRAME_TOUCH_INCREMENT), scratch1);
4601 jump(&touchFrameLoop);
4602 bind(&touchFrameLoopEnd);
4603 }
4604 }
4605
4606 namespace js {
4607 namespace jit {
4608
4609 #ifdef DEBUG
4610 template <class RegisterType>
AutoGenericRegisterScope(MacroAssembler & masm,RegisterType reg)4611 AutoGenericRegisterScope<RegisterType>::AutoGenericRegisterScope(
4612 MacroAssembler& masm, RegisterType reg)
4613 : RegisterType(reg), masm_(masm), released_(false) {
4614 masm.debugTrackedRegisters_.add(reg);
4615 }
4616
4617 template AutoGenericRegisterScope<Register>::AutoGenericRegisterScope(
4618 MacroAssembler& masm, Register reg);
4619 template AutoGenericRegisterScope<FloatRegister>::AutoGenericRegisterScope(
4620 MacroAssembler& masm, FloatRegister reg);
4621 #endif // DEBUG
4622
4623 #ifdef DEBUG
4624 template <class RegisterType>
~AutoGenericRegisterScope()4625 AutoGenericRegisterScope<RegisterType>::~AutoGenericRegisterScope() {
4626 if (!released_) {
4627 release();
4628 }
4629 }
4630
4631 template AutoGenericRegisterScope<Register>::~AutoGenericRegisterScope();
4632 template AutoGenericRegisterScope<FloatRegister>::~AutoGenericRegisterScope();
4633
4634 template <class RegisterType>
release()4635 void AutoGenericRegisterScope<RegisterType>::release() {
4636 MOZ_ASSERT(!released_);
4637 released_ = true;
4638 const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
4639 masm_.debugTrackedRegisters_.take(reg);
4640 }
4641
4642 template void AutoGenericRegisterScope<Register>::release();
4643 template void AutoGenericRegisterScope<FloatRegister>::release();
4644
4645 template <class RegisterType>
reacquire()4646 void AutoGenericRegisterScope<RegisterType>::reacquire() {
4647 MOZ_ASSERT(released_);
4648 released_ = false;
4649 const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
4650 masm_.debugTrackedRegisters_.add(reg);
4651 }
4652
4653 template void AutoGenericRegisterScope<Register>::reacquire();
4654 template void AutoGenericRegisterScope<FloatRegister>::reacquire();
4655
4656 #endif // DEBUG
4657
4658 } // namespace jit
4659
4660 namespace wasm {
ExtractCallerTlsFromFrameWithTls(const Frame * fp)4661 const TlsData* ExtractCallerTlsFromFrameWithTls(const Frame* fp) {
4662 return *reinterpret_cast<TlsData* const*>(
4663 reinterpret_cast<const uint8_t*>(fp) + sizeof(Frame) + ShadowStackSpace +
4664 FrameWithTls::callerTLSOffset());
4665 }
4666
ExtractCalleeTlsFromFrameWithTls(const Frame * fp)4667 const TlsData* ExtractCalleeTlsFromFrameWithTls(const Frame* fp) {
4668 return *reinterpret_cast<TlsData* const*>(
4669 reinterpret_cast<const uint8_t*>(fp) + sizeof(Frame) + ShadowStackSpace +
4670 FrameWithTls::calleeTLSOffset());
4671 }
4672 } // namespace wasm
4673
4674 } // namespace js
4675