1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/MacroAssembler-inl.h"
8
9 #include "mozilla/CheckedInt.h"
10 #include "mozilla/MathAlgorithms.h"
11
12 #include <algorithm>
13
14 #include "jsfriendapi.h"
15
16 #include "builtin/TypedObject.h"
17 #include "gc/GCProbes.h"
18 #include "jit/AtomicOp.h"
19 #include "jit/Bailouts.h"
20 #include "jit/BaselineFrame.h"
21 #include "jit/BaselineIC.h"
22 #include "jit/BaselineJIT.h"
23 #include "jit/JitOptions.h"
24 #include "jit/Lowering.h"
25 #include "jit/MIR.h"
26 #include "jit/MoveEmitter.h"
27 #include "jit/Simulator.h"
28 #include "js/Conversions.h"
29 #include "js/Printf.h"
30 #include "vm/ArrayBufferViewObject.h"
31 #include "vm/FunctionFlags.h" // js::FunctionFlags
32 #include "vm/TraceLogging.h"
33
34 #include "gc/Nursery-inl.h"
35 #include "jit/shared/Lowering-shared-inl.h"
36 #include "jit/TemplateObject-inl.h"
37 #include "vm/Interpreter-inl.h"
38 #include "vm/JSObject-inl.h"
39 #include "vm/TypeInference-inl.h"
40
41 using namespace js;
42 using namespace js::jit;
43
44 using JS::GenericNaN;
45 using JS::ToInt32;
46
47 using mozilla::CheckedUint32;
48
49 template <typename T>
EmitTypeCheck(MacroAssembler & masm,Assembler::Condition cond,const T & src,TypeSet::Type type,Label * label)50 static void EmitTypeCheck(MacroAssembler& masm, Assembler::Condition cond,
51 const T& src, TypeSet::Type type, Label* label) {
52 if (type.isAnyObject()) {
53 masm.branchTestObject(cond, src, label);
54 return;
55 }
56 switch (type.primitive()) {
57 case ValueType::Double:
58 // TI double type includes int32.
59 masm.branchTestNumber(cond, src, label);
60 break;
61 case ValueType::Int32:
62 masm.branchTestInt32(cond, src, label);
63 break;
64 case ValueType::Boolean:
65 masm.branchTestBoolean(cond, src, label);
66 break;
67 case ValueType::String:
68 masm.branchTestString(cond, src, label);
69 break;
70 case ValueType::Symbol:
71 masm.branchTestSymbol(cond, src, label);
72 break;
73 case ValueType::BigInt:
74 masm.branchTestBigInt(cond, src, label);
75 break;
76 case ValueType::Null:
77 masm.branchTestNull(cond, src, label);
78 break;
79 case ValueType::Undefined:
80 masm.branchTestUndefined(cond, src, label);
81 break;
82 case ValueType::Magic:
83 masm.branchTestMagic(cond, src, label);
84 break;
85 case ValueType::PrivateGCThing:
86 case ValueType::Object:
87 MOZ_CRASH("Unexpected type");
88 }
89 }
90
91 template <typename Source>
guardTypeSet(const Source & address,const TypeSet * types,BarrierKind kind,Register unboxScratch,Register objScratch,Register spectreRegToZero,Label * miss)92 void MacroAssembler::guardTypeSet(const Source& address, const TypeSet* types,
93 BarrierKind kind, Register unboxScratch,
94 Register objScratch,
95 Register spectreRegToZero, Label* miss) {
96 // unboxScratch may be InvalidReg on 32-bit platforms. It should only be
97 // used for extracting the Value tag or payload.
98 //
99 // objScratch may be InvalidReg if the TypeSet does not contain specific
100 // objects to guard on. It should only be used for guardObjectType.
101 //
102 // spectreRegToZero is a register that will be zeroed by guardObjectType on
103 // speculatively executed paths.
104
105 MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
106 MOZ_ASSERT(!types->unknown());
107
108 Label matched;
109 TypeSet::Type tests[] = {TypeSet::Int32Type(), TypeSet::UndefinedType(),
110 TypeSet::BooleanType(), TypeSet::StringType(),
111 TypeSet::SymbolType(), TypeSet::BigIntType(),
112 TypeSet::NullType(), TypeSet::MagicArgType(),
113 TypeSet::AnyObjectType()};
114
115 // The double type also implies Int32.
116 // So replace the int32 test with the double one.
117 if (types->hasType(TypeSet::DoubleType())) {
118 MOZ_ASSERT(types->hasType(TypeSet::Int32Type()));
119 tests[0] = TypeSet::DoubleType();
120 }
121
122 unsigned numBranches = 0;
123 for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
124 if (types->hasType(tests[i])) {
125 numBranches++;
126 }
127 }
128
129 if (!types->unknownObject() && types->getObjectCount() > 0) {
130 numBranches++;
131 }
132
133 if (numBranches == 0) {
134 MOZ_ASSERT(types->empty());
135 jump(miss);
136 return;
137 }
138
139 Register tag = extractTag(address, unboxScratch);
140
141 // Emit all typed tests.
142 for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
143 if (!types->hasType(tests[i])) {
144 continue;
145 }
146
147 if (--numBranches > 0) {
148 EmitTypeCheck(*this, Equal, tag, tests[i], &matched);
149 } else {
150 EmitTypeCheck(*this, NotEqual, tag, tests[i], miss);
151 }
152 }
153
154 // If we don't have specific objects to check for, we're done.
155 if (numBranches == 0) {
156 MOZ_ASSERT(types->unknownObject() || types->getObjectCount() == 0);
157 bind(&matched);
158 return;
159 }
160
161 // Test specific objects.
162 MOZ_ASSERT(objScratch != InvalidReg);
163 MOZ_ASSERT(objScratch != unboxScratch);
164
165 MOZ_ASSERT(numBranches == 1);
166 branchTestObject(NotEqual, tag, miss);
167
168 if (kind != BarrierKind::TypeTagOnly) {
169 Register obj = extractObject(address, unboxScratch);
170 guardObjectType(obj, types, objScratch, spectreRegToZero, miss);
171 } else {
172 #ifdef DEBUG
173 Label fail;
174 Register obj = extractObject(address, unboxScratch);
175 guardObjectType(obj, types, objScratch, spectreRegToZero, &fail);
176 jump(&matched);
177
178 bind(&fail);
179 guardTypeSetMightBeIncomplete(types, obj, objScratch, &matched);
180 assumeUnreachable("Unexpected object type");
181 #endif
182 }
183
184 bind(&matched);
185 }
186
187 template <>
guardTypeSet(const TypedOrValueRegister & reg,const TypeSet * types,BarrierKind kind,Register unboxScratch,Register objScratch,Register spectreRegToZero,Label * miss)188 void MacroAssembler::guardTypeSet(const TypedOrValueRegister& reg,
189 const TypeSet* types, BarrierKind kind,
190 Register unboxScratch, Register objScratch,
191 Register spectreRegToZero, Label* miss) {
192 // See guardTypeSet comments above. This is a specialization for
193 // TypedOrValueRegister.
194
195 MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
196 MOZ_ASSERT(!types->unknown());
197
198 if (reg.hasValue()) {
199 guardTypeSet(reg.valueReg(), types, kind, unboxScratch, objScratch,
200 spectreRegToZero, miss);
201 return;
202 }
203
204 MIRType valType = reg.type();
205 MOZ_ASSERT(valType != MIRType::Value);
206
207 if (valType != MIRType::Object) {
208 // Barrier always either succeeds or fails.
209 if (!types->hasType(TypeSet::PrimitiveType(valType))) {
210 jump(miss);
211 }
212 return;
213 }
214
215 if (types->unknownObject()) {
216 // Barrier always succeeds.
217 return;
218 }
219
220 if (types->getObjectCount() == 0) {
221 // Barrier always fails.
222 jump(miss);
223 return;
224 }
225
226 if (kind == BarrierKind::TypeTagOnly) {
227 // Barrier always succeeds. Assert the type matches in DEBUG builds.
228 #ifdef DEBUG
229 Label fail, matched;
230 Register obj = reg.typedReg().gpr();
231 guardObjectType(obj, types, objScratch, spectreRegToZero, &fail);
232 jump(&matched);
233
234 bind(&fail);
235 guardTypeSetMightBeIncomplete(types, obj, objScratch, &matched);
236 assumeUnreachable("Unexpected object type");
237
238 bind(&matched);
239 #endif
240 return;
241 }
242
243 MOZ_ASSERT(kind == BarrierKind::TypeSet);
244 MOZ_ASSERT(objScratch != InvalidReg);
245
246 // Test specific objects.
247 Register obj = reg.typedReg().gpr();
248 guardObjectType(obj, types, objScratch, spectreRegToZero, miss);
249 }
250
251 #ifdef DEBUG
252 // guardTypeSetMightBeIncomplete is only used in DEBUG builds. If this ever
253 // changes, we need to make sure it's Spectre-safe.
guardTypeSetMightBeIncomplete(const TypeSet * types,Register obj,Register scratch,Label * label)254 void MacroAssembler::guardTypeSetMightBeIncomplete(const TypeSet* types,
255 Register obj,
256 Register scratch,
257 Label* label) {
258 // Type set guards might miss when an object's group changes. In this case
259 // either its old group's properties will become unknown, or it will change
260 // to a native object with an original unboxed group. Jump to label if this
261 // might have happened for the input object.
262
263 if (types->unknownObject()) {
264 jump(label);
265 return;
266 }
267
268 for (size_t i = 0; i < types->getObjectCount(); i++) {
269 if (JSObject* singleton = getSingletonAndDelayBarrier(types, i)) {
270 movePtr(ImmGCPtr(singleton), scratch);
271 loadPtr(Address(scratch, JSObject::offsetOfGroup()), scratch);
272 } else if (ObjectGroup* group = getGroupAndDelayBarrier(types, i)) {
273 movePtr(ImmGCPtr(group), scratch);
274 } else {
275 continue;
276 }
277 branchTest32(Assembler::NonZero,
278 Address(scratch, ObjectGroup::offsetOfFlags()),
279 Imm32(OBJECT_FLAG_UNKNOWN_PROPERTIES), label);
280 }
281 }
282 #endif
283
guardObjectType(Register obj,const TypeSet * types,Register scratch,Register spectreRegToZero,Label * miss)284 void MacroAssembler::guardObjectType(Register obj, const TypeSet* types,
285 Register scratch,
286 Register spectreRegToZero, Label* miss) {
287 MOZ_ASSERT(obj != scratch);
288 MOZ_ASSERT(!types->unknown());
289 MOZ_ASSERT(!types->hasType(TypeSet::AnyObjectType()));
290 MOZ_ASSERT_IF(types->getObjectCount() > 0, scratch != InvalidReg);
291
292 // Note: this method elides read barriers on values read from type sets, as
293 // this may be called off thread during Ion compilation. This is
294 // safe to do as the final JitCode object will be allocated during the
295 // incremental GC (or the compilation canceled before we start sweeping),
296 // see CodeGenerator::link. Other callers should use TypeSet::readBarrier
297 // to trigger the barrier on the contents of type sets passed in here.
298 Label matched;
299
300 bool hasSingletons = false;
301 bool hasObjectGroups = false;
302 unsigned numBranches = 0;
303
304 unsigned count = types->getObjectCount();
305 for (unsigned i = 0; i < count; i++) {
306 if (types->hasGroup(i)) {
307 hasObjectGroups = true;
308 numBranches++;
309 } else if (types->hasSingleton(i)) {
310 hasSingletons = true;
311 numBranches++;
312 }
313 }
314
315 if (numBranches == 0) {
316 jump(miss);
317 return;
318 }
319
320 if (JitOptions.spectreObjectMitigationsBarriers) {
321 move32(Imm32(0), scratch);
322 }
323
324 if (hasSingletons) {
325 for (unsigned i = 0; i < count; i++) {
326 JSObject* singleton = getSingletonAndDelayBarrier(types, i);
327 if (!singleton) {
328 continue;
329 }
330
331 if (JitOptions.spectreObjectMitigationsBarriers) {
332 if (--numBranches > 0) {
333 Label next;
334 branchPtr(NotEqual, obj, ImmGCPtr(singleton), &next);
335 spectreMovePtr(NotEqual, scratch, spectreRegToZero);
336 jump(&matched);
337 bind(&next);
338 } else {
339 branchPtr(NotEqual, obj, ImmGCPtr(singleton), miss);
340 spectreMovePtr(NotEqual, scratch, spectreRegToZero);
341 }
342 } else {
343 if (--numBranches > 0) {
344 branchPtr(Equal, obj, ImmGCPtr(singleton), &matched);
345 } else {
346 branchPtr(NotEqual, obj, ImmGCPtr(singleton), miss);
347 }
348 }
349 }
350 }
351
352 if (hasObjectGroups) {
353 comment("has object groups");
354
355 // If Spectre mitigations are enabled, we use the scratch register as
356 // zero register. Without mitigations we can use it to store the group.
357 Address groupAddr(obj, JSObject::offsetOfGroup());
358 if (!JitOptions.spectreObjectMitigationsBarriers) {
359 loadPtr(groupAddr, scratch);
360 }
361
362 for (unsigned i = 0; i < count; i++) {
363 ObjectGroup* group = getGroupAndDelayBarrier(types, i);
364 if (!group) {
365 continue;
366 }
367
368 if (!pendingObjectGroupReadBarriers_.append(group)) {
369 setOOM();
370 return;
371 }
372
373 if (JitOptions.spectreObjectMitigationsBarriers) {
374 if (--numBranches > 0) {
375 Label next;
376 branchPtr(NotEqual, groupAddr, ImmGCPtr(group), &next);
377 spectreMovePtr(NotEqual, scratch, spectreRegToZero);
378 jump(&matched);
379 bind(&next);
380 } else {
381 branchPtr(NotEqual, groupAddr, ImmGCPtr(group), miss);
382 spectreMovePtr(NotEqual, scratch, spectreRegToZero);
383 }
384 } else {
385 if (--numBranches > 0) {
386 branchPtr(Equal, scratch, ImmGCPtr(group), &matched);
387 } else {
388 branchPtr(NotEqual, scratch, ImmGCPtr(group), miss);
389 }
390 }
391 }
392 }
393
394 MOZ_ASSERT(numBranches == 0);
395
396 bind(&matched);
397 }
398
399 template void MacroAssembler::guardTypeSet(
400 const Address& address, const TypeSet* types, BarrierKind kind,
401 Register unboxScratch, Register objScratch, Register spectreRegToZero,
402 Label* miss);
403 template void MacroAssembler::guardTypeSet(
404 const ValueOperand& value, const TypeSet* types, BarrierKind kind,
405 Register unboxScratch, Register objScratch, Register spectreRegToZero,
406 Label* miss);
407
408 template <typename S, typename T>
StoreToTypedFloatArray(MacroAssembler & masm,int arrayType,const S & value,const T & dest)409 static void StoreToTypedFloatArray(MacroAssembler& masm, int arrayType,
410 const S& value, const T& dest) {
411 switch (arrayType) {
412 case Scalar::Float32:
413 masm.storeFloat32(value, dest);
414 break;
415 case Scalar::Float64:
416 masm.storeDouble(value, dest);
417 break;
418 default:
419 MOZ_CRASH("Invalid typed array type");
420 }
421 }
422
storeToTypedFloatArray(Scalar::Type arrayType,FloatRegister value,const BaseIndex & dest)423 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
424 FloatRegister value,
425 const BaseIndex& dest) {
426 StoreToTypedFloatArray(*this, arrayType, value, dest);
427 }
storeToTypedFloatArray(Scalar::Type arrayType,FloatRegister value,const Address & dest)428 void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
429 FloatRegister value,
430 const Address& dest) {
431 StoreToTypedFloatArray(*this, arrayType, value, dest);
432 }
433
434 template <typename S, typename T>
StoreToTypedBigIntArray(MacroAssembler & masm,Scalar::Type arrayType,const S & value,const T & dest)435 static void StoreToTypedBigIntArray(MacroAssembler& masm,
436 Scalar::Type arrayType, const S& value,
437 const T& dest) {
438 MOZ_ASSERT(Scalar::isBigIntType(arrayType));
439 masm.store64(value, dest);
440 }
441
storeToTypedBigIntArray(Scalar::Type arrayType,Register64 value,const BaseIndex & dest)442 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
443 Register64 value,
444 const BaseIndex& dest) {
445 StoreToTypedBigIntArray(*this, arrayType, value, dest);
446 }
storeToTypedBigIntArray(Scalar::Type arrayType,Register64 value,const Address & dest)447 void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
448 Register64 value,
449 const Address& dest) {
450 StoreToTypedBigIntArray(*this, arrayType, value, dest);
451 }
452
453 template <typename T>
loadFromTypedArray(Scalar::Type arrayType,const T & src,AnyRegister dest,Register temp,Label * fail)454 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
455 AnyRegister dest, Register temp,
456 Label* fail) {
457 switch (arrayType) {
458 case Scalar::Int8:
459 load8SignExtend(src, dest.gpr());
460 break;
461 case Scalar::Uint8:
462 case Scalar::Uint8Clamped:
463 load8ZeroExtend(src, dest.gpr());
464 break;
465 case Scalar::Int16:
466 load16SignExtend(src, dest.gpr());
467 break;
468 case Scalar::Uint16:
469 load16ZeroExtend(src, dest.gpr());
470 break;
471 case Scalar::Int32:
472 load32(src, dest.gpr());
473 break;
474 case Scalar::Uint32:
475 if (dest.isFloat()) {
476 load32(src, temp);
477 convertUInt32ToDouble(temp, dest.fpu());
478 } else {
479 load32(src, dest.gpr());
480
481 // Bail out if the value doesn't fit into a signed int32 value. This
482 // is what allows MLoadUnboxedScalar to have a type() of
483 // MIRType::Int32 for UInt32 array loads.
484 branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
485 }
486 break;
487 case Scalar::Float32:
488 loadFloat32(src, dest.fpu());
489 canonicalizeFloat(dest.fpu());
490 break;
491 case Scalar::Float64:
492 loadDouble(src, dest.fpu());
493 canonicalizeDouble(dest.fpu());
494 break;
495 case Scalar::BigInt64:
496 case Scalar::BigUint64:
497 default:
498 MOZ_CRASH("Invalid typed array type");
499 }
500 }
501
502 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
503 const Address& src,
504 AnyRegister dest,
505 Register temp, Label* fail);
506 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
507 const BaseIndex& src,
508 AnyRegister dest,
509 Register temp, Label* fail);
510
511 template <typename T>
loadFromTypedArray(Scalar::Type arrayType,const T & src,const ValueOperand & dest,bool allowDouble,Register temp,Label * fail)512 void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
513 const ValueOperand& dest,
514 bool allowDouble, Register temp,
515 Label* fail) {
516 switch (arrayType) {
517 case Scalar::Int8:
518 case Scalar::Uint8:
519 case Scalar::Uint8Clamped:
520 case Scalar::Int16:
521 case Scalar::Uint16:
522 case Scalar::Int32:
523 loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()),
524 InvalidReg, nullptr);
525 tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
526 break;
527 case Scalar::Uint32:
528 // Don't clobber dest when we could fail, instead use temp.
529 load32(src, temp);
530 if (allowDouble) {
531 // If the value fits in an int32, store an int32 type tag.
532 // Else, convert the value to double and box it.
533 Label done, isDouble;
534 branchTest32(Assembler::Signed, temp, temp, &isDouble);
535 {
536 tagValue(JSVAL_TYPE_INT32, temp, dest);
537 jump(&done);
538 }
539 bind(&isDouble);
540 {
541 ScratchDoubleScope fpscratch(*this);
542 convertUInt32ToDouble(temp, fpscratch);
543 boxDouble(fpscratch, dest, fpscratch);
544 }
545 bind(&done);
546 } else {
547 // Bailout if the value does not fit in an int32.
548 branchTest32(Assembler::Signed, temp, temp, fail);
549 tagValue(JSVAL_TYPE_INT32, temp, dest);
550 }
551 break;
552 case Scalar::Float32: {
553 ScratchDoubleScope dscratch(*this);
554 FloatRegister fscratch = dscratch.asSingle();
555 loadFromTypedArray(arrayType, src, AnyRegister(fscratch),
556 dest.scratchReg(), nullptr);
557 convertFloat32ToDouble(fscratch, dscratch);
558 boxDouble(dscratch, dest, dscratch);
559 break;
560 }
561 case Scalar::Float64: {
562 ScratchDoubleScope fpscratch(*this);
563 loadFromTypedArray(arrayType, src, AnyRegister(fpscratch),
564 dest.scratchReg(), nullptr);
565 boxDouble(fpscratch, dest, fpscratch);
566 break;
567 }
568 case Scalar::BigInt64:
569 case Scalar::BigUint64:
570 default:
571 MOZ_CRASH("Invalid typed array type");
572 }
573 }
574
575 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
576 const Address& src,
577 const ValueOperand& dest,
578 bool allowDouble,
579 Register temp, Label* fail);
580 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
581 const BaseIndex& src,
582 const ValueOperand& dest,
583 bool allowDouble,
584 Register temp, Label* fail);
585
586 template <typename T>
loadFromTypedBigIntArray(Scalar::Type arrayType,const T & src,Register bigInt,Register64 temp)587 void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
588 const T& src, Register bigInt,
589 Register64 temp) {
590 MOZ_ASSERT(Scalar::isBigIntType(arrayType));
591
592 load64(src, temp);
593 initializeBigInt64(arrayType, bigInt, temp);
594 }
595
596 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
597 const Address& src,
598 Register bigInt,
599 Register64 temp);
600 template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
601 const BaseIndex& src,
602 Register bigInt,
603 Register64 temp);
604
605 // Inlined version of gc::CheckAllocatorState that checks the bare essentials
606 // and bails for anything that cannot be handled with our jit allocators.
checkAllocatorState(Label * fail)607 void MacroAssembler::checkAllocatorState(Label* fail) {
608 // Don't execute the inline path if GC probes are built in.
609 #ifdef JS_GC_PROBES
610 jump(fail);
611 #endif
612
613 #ifdef JS_GC_ZEAL
614 // Don't execute the inline path if gc zeal or tracing are active.
615 const uint32_t* ptrZealModeBits =
616 GetJitContext()->runtime->addressOfGCZealModeBits();
617 branch32(Assembler::NotEqual, AbsoluteAddress(ptrZealModeBits), Imm32(0),
618 fail);
619 #endif
620
621 // Don't execute the inline path if the realm has an object metadata callback,
622 // as the metadata to use for the object may vary between executions of the
623 // op.
624 if (GetJitContext()->realm()->hasAllocationMetadataBuilder()) {
625 jump(fail);
626 }
627 }
628
shouldNurseryAllocate(gc::AllocKind allocKind,gc::InitialHeap initialHeap)629 bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind,
630 gc::InitialHeap initialHeap) {
631 // Note that Ion elides barriers on writes to objects known to be in the
632 // nursery, so any allocation that can be made into the nursery must be made
633 // into the nursery, even if the nursery is disabled. At runtime these will
634 // take the out-of-line path, which is required to insert a barrier for the
635 // initializing writes.
636 return IsNurseryAllocable(allocKind) && initialHeap != gc::TenuredHeap;
637 }
638
639 // Inline version of Nursery::allocateObject. If the object has dynamic slots,
640 // this fills in the slots_ pointer.
nurseryAllocateObject(Register result,Register temp,gc::AllocKind allocKind,size_t nDynamicSlots,Label * fail)641 void MacroAssembler::nurseryAllocateObject(Register result, Register temp,
642 gc::AllocKind allocKind,
643 size_t nDynamicSlots, Label* fail) {
644 MOZ_ASSERT(IsNurseryAllocable(allocKind));
645
646 // We still need to allocate in the nursery, per the comment in
647 // shouldNurseryAllocate; however, we need to insert into the
648 // mallocedBuffers set, so bail to do the nursery allocation in the
649 // interpreter.
650 if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
651 jump(fail);
652 return;
653 }
654
655 // No explicit check for nursery.isEnabled() is needed, as the comparison
656 // with the nursery's end will always fail in such cases.
657 CompileZone* zone = GetJitContext()->realm()->zone();
658 size_t thingSize = gc::Arena::thingSize(allocKind);
659 size_t totalSize = thingSize + nDynamicSlots * sizeof(HeapSlot);
660 MOZ_ASSERT(totalSize < INT32_MAX);
661 MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
662
663 bumpPointerAllocate(
664 result, temp, fail, zone, zone->addressOfNurseryPosition(),
665 zone->addressOfNurseryCurrentEnd(), JS::TraceKind::Object, totalSize);
666
667 if (nDynamicSlots) {
668 computeEffectiveAddress(Address(result, thingSize), temp);
669 storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
670 }
671 }
672
673 // Inlined version of FreeSpan::allocate. This does not fill in slots_.
freeListAllocate(Register result,Register temp,gc::AllocKind allocKind,Label * fail)674 void MacroAssembler::freeListAllocate(Register result, Register temp,
675 gc::AllocKind allocKind, Label* fail) {
676 CompileZone* zone = GetJitContext()->realm()->zone();
677 int thingSize = int(gc::Arena::thingSize(allocKind));
678
679 Label fallback;
680 Label success;
681
682 // Load the first and last offsets of |zone|'s free list for |allocKind|.
683 // If there is no room remaining in the span, fall back to get the next one.
684 gc::FreeSpan** ptrFreeList = zone->addressOfFreeList(allocKind);
685 loadPtr(AbsoluteAddress(ptrFreeList), temp);
686 load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
687 load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
688 branch32(Assembler::AboveOrEqual, result, temp, &fallback);
689
690 // Bump the offset for the next allocation.
691 add32(Imm32(thingSize), result);
692 loadPtr(AbsoluteAddress(ptrFreeList), temp);
693 store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
694 sub32(Imm32(thingSize), result);
695 addPtr(temp, result); // Turn the offset into a pointer.
696 jump(&success);
697
698 bind(&fallback);
699 // If there are no free spans left, we bail to finish the allocation. The
700 // interpreter will call the GC allocator to set up a new arena to allocate
701 // from, after which we can resume allocating in the jit.
702 branchTest32(Assembler::Zero, result, result, fail);
703 loadPtr(AbsoluteAddress(ptrFreeList), temp);
704 addPtr(temp, result); // Turn the offset into a pointer.
705 Push(result);
706 // Update the free list to point to the next span (which may be empty).
707 load32(Address(result, 0), result);
708 store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
709 Pop(result);
710
711 bind(&success);
712
713 if (GetJitContext()->runtime->geckoProfiler().enabled()) {
714 uint32_t* countAddress =
715 GetJitContext()->runtime->addressOfTenuredAllocCount();
716 movePtr(ImmPtr(countAddress), temp);
717 add32(Imm32(1), Address(temp, 0));
718 }
719 }
720
callFreeStub(Register slots)721 void MacroAssembler::callFreeStub(Register slots) {
722 // This register must match the one in JitRuntime::generateFreeStub.
723 const Register regSlots = CallTempReg0;
724
725 push(regSlots);
726 movePtr(slots, regSlots);
727 call(GetJitContext()->runtime->jitRuntime()->freeStub());
728 pop(regSlots);
729 }
730
731 // Inlined equivalent of gc::AllocateObject, without failure case handling.
allocateObject(Register result,Register temp,gc::AllocKind allocKind,uint32_t nDynamicSlots,gc::InitialHeap initialHeap,Label * fail)732 void MacroAssembler::allocateObject(Register result, Register temp,
733 gc::AllocKind allocKind,
734 uint32_t nDynamicSlots,
735 gc::InitialHeap initialHeap, Label* fail) {
736 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
737
738 checkAllocatorState(fail);
739
740 if (shouldNurseryAllocate(allocKind, initialHeap)) {
741 MOZ_ASSERT(initialHeap == gc::DefaultHeap);
742 return nurseryAllocateObject(result, temp, allocKind, nDynamicSlots, fail);
743 }
744
745 // Fall back to calling into the VM to allocate objects in the tenured heap
746 // that have dynamic slots.
747 if (nDynamicSlots) {
748 jump(fail);
749 return;
750 }
751
752 return freeListAllocate(result, temp, allocKind, fail);
753 }
754
createGCObject(Register obj,Register temp,const TemplateObject & templateObj,gc::InitialHeap initialHeap,Label * fail,bool initContents)755 void MacroAssembler::createGCObject(Register obj, Register temp,
756 const TemplateObject& templateObj,
757 gc::InitialHeap initialHeap, Label* fail,
758 bool initContents) {
759 gc::AllocKind allocKind = templateObj.getAllocKind();
760 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
761
762 uint32_t nDynamicSlots = 0;
763 if (templateObj.isNative()) {
764 const NativeTemplateObject& ntemplate =
765 templateObj.asNativeTemplateObject();
766 nDynamicSlots = ntemplate.numDynamicSlots();
767
768 // Arrays with copy on write elements do not need fixed space for an
769 // elements header. The template object, which owns the original
770 // elements, might have another allocation kind.
771 if (ntemplate.denseElementsAreCopyOnWrite()) {
772 allocKind = gc::AllocKind::OBJECT0_BACKGROUND;
773 }
774 }
775
776 allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
777 initGCThing(obj, temp, templateObj, initContents);
778 }
779
780 // Inlined equivalent of gc::AllocateNonObject, without failure case handling.
781 // Non-object allocation does not need to worry about slots, so can take a
782 // simpler path.
allocateNonObject(Register result,Register temp,gc::AllocKind allocKind,Label * fail)783 void MacroAssembler::allocateNonObject(Register result, Register temp,
784 gc::AllocKind allocKind, Label* fail) {
785 checkAllocatorState(fail);
786 freeListAllocate(result, temp, allocKind, fail);
787 }
788
789 // Inline version of Nursery::allocateString.
nurseryAllocateString(Register result,Register temp,gc::AllocKind allocKind,Label * fail)790 void MacroAssembler::nurseryAllocateString(Register result, Register temp,
791 gc::AllocKind allocKind,
792 Label* fail) {
793 MOZ_ASSERT(IsNurseryAllocable(allocKind));
794
795 // No explicit check for nursery.isEnabled() is needed, as the comparison
796 // with the nursery's end will always fail in such cases.
797
798 CompileZone* zone = GetJitContext()->realm()->zone();
799 size_t thingSize = gc::Arena::thingSize(allocKind);
800
801 bumpPointerAllocate(result, temp, fail, zone,
802 zone->addressOfStringNurseryPosition(),
803 zone->addressOfStringNurseryCurrentEnd(),
804 JS::TraceKind::String, thingSize);
805 }
806
807 // Inline version of Nursery::allocateBigInt.
nurseryAllocateBigInt(Register result,Register temp,Label * fail)808 void MacroAssembler::nurseryAllocateBigInt(Register result, Register temp,
809 Label* fail) {
810 MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT));
811
812 // No explicit check for nursery.isEnabled() is needed, as the comparison
813 // with the nursery's end will always fail in such cases.
814
815 CompileZone* zone = GetJitContext()->realm()->zone();
816 size_t thingSize = gc::Arena::thingSize(gc::AllocKind::BIGINT);
817
818 bumpPointerAllocate(result, temp, fail, zone,
819 zone->addressOfBigIntNurseryPosition(),
820 zone->addressOfBigIntNurseryCurrentEnd(),
821 JS::TraceKind::BigInt, thingSize);
822 }
823
bumpPointerAllocate(Register result,Register temp,Label * fail,CompileZone * zone,void * posAddr,const void * curEndAddr,JS::TraceKind traceKind,uint32_t size)824 void MacroAssembler::bumpPointerAllocate(Register result, Register temp,
825 Label* fail, CompileZone* zone,
826 void* posAddr, const void* curEndAddr,
827 JS::TraceKind traceKind,
828 uint32_t size) {
829 uint32_t totalSize = size + Nursery::nurseryCellHeaderSize();
830 MOZ_ASSERT(totalSize < INT32_MAX, "Nursery allocation too large");
831 MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
832
833 // The position (allocation pointer) and the end pointer are stored
834 // very close to each other -- specifically, easily within a 32 bit offset.
835 // Use relative offsets between them, to avoid 64-bit immediate loads.
836 //
837 // I tried to optimise this further by using an extra register to avoid
838 // the final subtraction and hopefully get some more instruction
839 // parallelism, but it made no difference.
840 movePtr(ImmPtr(posAddr), temp);
841 loadPtr(Address(temp, 0), result);
842 addPtr(Imm32(totalSize), result);
843 CheckedInt<int32_t> endOffset =
844 (CheckedInt<uintptr_t>(uintptr_t(curEndAddr)) -
845 CheckedInt<uintptr_t>(uintptr_t(posAddr)))
846 .toChecked<int32_t>();
847 MOZ_ASSERT(endOffset.isValid(), "Position and end pointers must be nearby");
848 branchPtr(Assembler::Below, Address(temp, endOffset.value()), result, fail);
849 storePtr(result, Address(temp, 0));
850 subPtr(Imm32(size), result);
851 storePtr(ImmWord(zone->nurseryCellHeader(traceKind)),
852 Address(result, -js::Nursery::nurseryCellHeaderSize()));
853
854 if (GetJitContext()->runtime->geckoProfiler().enabled()) {
855 uint32_t* countAddress = zone->addressOfNurseryAllocCount();
856 CheckedInt<int32_t> counterOffset =
857 (CheckedInt<uintptr_t>(uintptr_t(countAddress)) -
858 CheckedInt<uintptr_t>(uintptr_t(posAddr)))
859 .toChecked<int32_t>();
860 if (counterOffset.isValid()) {
861 add32(Imm32(1), Address(temp, counterOffset.value()));
862 } else {
863 movePtr(ImmPtr(countAddress), temp);
864 add32(Imm32(1), Address(temp, 0));
865 }
866 }
867 }
868
869 // Inlined equivalent of gc::AllocateString, jumping to fail if nursery
870 // allocation requested but unsuccessful.
allocateString(Register result,Register temp,gc::AllocKind allocKind,gc::InitialHeap initialHeap,Label * fail)871 void MacroAssembler::allocateString(Register result, Register temp,
872 gc::AllocKind allocKind,
873 gc::InitialHeap initialHeap, Label* fail) {
874 MOZ_ASSERT(allocKind == gc::AllocKind::STRING ||
875 allocKind == gc::AllocKind::FAT_INLINE_STRING);
876
877 checkAllocatorState(fail);
878
879 if (shouldNurseryAllocate(allocKind, initialHeap)) {
880 MOZ_ASSERT(initialHeap == gc::DefaultHeap);
881 return nurseryAllocateString(result, temp, allocKind, fail);
882 }
883
884 freeListAllocate(result, temp, allocKind, fail);
885 }
886
newGCString(Register result,Register temp,Label * fail,bool attemptNursery)887 void MacroAssembler::newGCString(Register result, Register temp, Label* fail,
888 bool attemptNursery) {
889 allocateString(result, temp, js::gc::AllocKind::STRING,
890 attemptNursery ? gc::DefaultHeap : gc::TenuredHeap, fail);
891 }
892
newGCFatInlineString(Register result,Register temp,Label * fail,bool attemptNursery)893 void MacroAssembler::newGCFatInlineString(Register result, Register temp,
894 Label* fail, bool attemptNursery) {
895 allocateString(result, temp, js::gc::AllocKind::FAT_INLINE_STRING,
896 attemptNursery ? gc::DefaultHeap : gc::TenuredHeap, fail);
897 }
898
newGCBigInt(Register result,Register temp,Label * fail,bool attemptNursery)899 void MacroAssembler::newGCBigInt(Register result, Register temp, Label* fail,
900 bool attemptNursery) {
901 checkAllocatorState(fail);
902
903 gc::InitialHeap initialHeap =
904 attemptNursery ? gc::DefaultHeap : gc::TenuredHeap;
905 if (shouldNurseryAllocate(gc::AllocKind::BIGINT, initialHeap)) {
906 MOZ_ASSERT(initialHeap == gc::DefaultHeap);
907 return nurseryAllocateBigInt(result, temp, fail);
908 }
909
910 freeListAllocate(result, temp, gc::AllocKind::BIGINT, fail);
911 }
912
copySlotsFromTemplate(Register obj,const NativeTemplateObject & templateObj,uint32_t start,uint32_t end)913 void MacroAssembler::copySlotsFromTemplate(
914 Register obj, const NativeTemplateObject& templateObj, uint32_t start,
915 uint32_t end) {
916 uint32_t nfixed = std::min(templateObj.numFixedSlots(), end);
917 for (unsigned i = start; i < nfixed; i++) {
918 // Template objects are not exposed to script and therefore immutable.
919 // However, regexp template objects are sometimes used directly (when
920 // the cloning is not observable), and therefore we can end up with a
921 // non-zero lastIndex. Detect this case here and just substitute 0, to
922 // avoid racing with the main thread updating this slot.
923 Value v;
924 if (templateObj.isRegExpObject() && i == RegExpObject::lastIndexSlot()) {
925 v = Int32Value(0);
926 } else {
927 v = templateObj.getSlot(i);
928 }
929 storeValue(v, Address(obj, NativeObject::getFixedSlotOffset(i)));
930 }
931 }
932
fillSlotsWithConstantValue(Address base,Register temp,uint32_t start,uint32_t end,const Value & v)933 void MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
934 uint32_t start, uint32_t end,
935 const Value& v) {
936 MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
937
938 if (start >= end) {
939 return;
940 }
941
942 #ifdef JS_NUNBOX32
943 // We only have a single spare register, so do the initialization as two
944 // strided writes of the tag and body.
945 Address addr = base;
946 move32(Imm32(v.toNunboxPayload()), temp);
947 for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue)) {
948 store32(temp, ToPayload(addr));
949 }
950
951 addr = base;
952 move32(Imm32(v.toNunboxTag()), temp);
953 for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue)) {
954 store32(temp, ToType(addr));
955 }
956 #else
957 moveValue(v, ValueOperand(temp));
958 for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtrValue)) {
959 storePtr(temp, base);
960 }
961 #endif
962 }
963
fillSlotsWithUndefined(Address base,Register temp,uint32_t start,uint32_t end)964 void MacroAssembler::fillSlotsWithUndefined(Address base, Register temp,
965 uint32_t start, uint32_t end) {
966 fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
967 }
968
fillSlotsWithUninitialized(Address base,Register temp,uint32_t start,uint32_t end)969 void MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp,
970 uint32_t start, uint32_t end) {
971 fillSlotsWithConstantValue(base, temp, start, end,
972 MagicValue(JS_UNINITIALIZED_LEXICAL));
973 }
974
FindStartOfUninitializedAndUndefinedSlots(const NativeTemplateObject & templateObj,uint32_t nslots,uint32_t * startOfUninitialized,uint32_t * startOfUndefined)975 static void FindStartOfUninitializedAndUndefinedSlots(
976 const NativeTemplateObject& templateObj, uint32_t nslots,
977 uint32_t* startOfUninitialized, uint32_t* startOfUndefined) {
978 MOZ_ASSERT(nslots == templateObj.slotSpan());
979 MOZ_ASSERT(nslots > 0);
980
981 uint32_t first = nslots;
982 for (; first != 0; --first) {
983 if (templateObj.getSlot(first - 1) != UndefinedValue()) {
984 break;
985 }
986 }
987 *startOfUndefined = first;
988
989 if (first != 0 && IsUninitializedLexical(templateObj.getSlot(first - 1))) {
990 for (; first != 0; --first) {
991 if (!IsUninitializedLexical(templateObj.getSlot(first - 1))) {
992 break;
993 }
994 }
995 *startOfUninitialized = first;
996 } else {
997 *startOfUninitialized = *startOfUndefined;
998 }
999 }
1000
AllocateAndInitTypedArrayBuffer(JSContext * cx,TypedArrayObject * obj,int32_t count)1001 static void AllocateAndInitTypedArrayBuffer(JSContext* cx,
1002 TypedArrayObject* obj,
1003 int32_t count) {
1004 AutoUnsafeCallWithABI unsafe;
1005
1006 obj->initPrivate(nullptr);
1007
1008 // Negative numbers or zero will bail out to the slow path, which in turn will
1009 // raise an invalid argument exception or create a correct object with zero
1010 // elements.
1011 if (count <= 0 || uint32_t(count) >= INT32_MAX / obj->bytesPerElement()) {
1012 obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(0));
1013 return;
1014 }
1015
1016 obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(count));
1017
1018 size_t nbytes = count * obj->bytesPerElement();
1019 MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(),
1020 "RoundUp must not overflow");
1021
1022 nbytes = RoundUp(nbytes, sizeof(Value));
1023 void* buf = cx->nursery().allocateZeroedBuffer(obj, nbytes,
1024 js::ArrayBufferContentsArena);
1025 if (buf) {
1026 InitObjectPrivate(obj, buf, nbytes, MemoryUse::TypedArrayElements);
1027 }
1028 }
1029
initTypedArraySlots(Register obj,Register temp,Register lengthReg,LiveRegisterSet liveRegs,Label * fail,TypedArrayObject * templateObj,TypedArrayLength lengthKind)1030 void MacroAssembler::initTypedArraySlots(Register obj, Register temp,
1031 Register lengthReg,
1032 LiveRegisterSet liveRegs, Label* fail,
1033 TypedArrayObject* templateObj,
1034 TypedArrayLength lengthKind) {
1035 MOZ_ASSERT(templateObj->hasPrivate());
1036 MOZ_ASSERT(!templateObj->hasBuffer());
1037
1038 constexpr size_t dataSlotOffset = ArrayBufferViewObject::dataOffset();
1039 constexpr size_t dataOffset = dataSlotOffset + sizeof(HeapSlot);
1040
1041 static_assert(
1042 TypedArrayObject::FIXED_DATA_START == TypedArrayObject::DATA_SLOT + 1,
1043 "fixed inline element data assumed to begin after the data slot");
1044
1045 static_assert(
1046 TypedArrayObject::INLINE_BUFFER_LIMIT ==
1047 JSObject::MAX_BYTE_SIZE - dataOffset,
1048 "typed array inline buffer is limited by the maximum object byte size");
1049
1050 // Initialise data elements to zero.
1051 int32_t length = templateObj->length();
1052 size_t nbytes = length * templateObj->bytesPerElement();
1053
1054 if (lengthKind == TypedArrayLength::Fixed &&
1055 nbytes <= TypedArrayObject::INLINE_BUFFER_LIMIT) {
1056 MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
1057
1058 // Store data elements inside the remaining JSObject slots.
1059 computeEffectiveAddress(Address(obj, dataOffset), temp);
1060 storePtr(temp, Address(obj, dataSlotOffset));
1061
1062 // Write enough zero pointers into fixed data to zero every
1063 // element. (This zeroes past the end of a byte count that's
1064 // not a multiple of pointer size. That's okay, because fixed
1065 // data is a count of 8-byte HeapSlots (i.e. <= pointer size),
1066 // and we won't inline unless the desired memory fits in that
1067 // space.)
1068 static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
1069
1070 size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char*);
1071 for (size_t i = 0; i < numZeroPointers; i++) {
1072 storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char*)));
1073 }
1074 #ifdef DEBUG
1075 if (nbytes == 0) {
1076 store8(Imm32(TypedArrayObject::ZeroLengthArrayData),
1077 Address(obj, dataSlotOffset));
1078 }
1079 #endif
1080 } else {
1081 if (lengthKind == TypedArrayLength::Fixed) {
1082 move32(Imm32(length), lengthReg);
1083 }
1084
1085 // Allocate a buffer on the heap to store the data elements.
1086 liveRegs.addUnchecked(temp);
1087 liveRegs.addUnchecked(obj);
1088 liveRegs.addUnchecked(lengthReg);
1089 PushRegsInMask(liveRegs);
1090 setupUnalignedABICall(temp);
1091 loadJSContext(temp);
1092 passABIArg(temp);
1093 passABIArg(obj);
1094 passABIArg(lengthReg);
1095 callWithABI(JS_FUNC_TO_DATA_PTR(void*, AllocateAndInitTypedArrayBuffer));
1096 PopRegsInMask(liveRegs);
1097
1098 // Fail when data elements is set to NULL.
1099 branchPtr(Assembler::Equal, Address(obj, dataSlotOffset), ImmWord(0), fail);
1100 }
1101 }
1102
initGCSlots(Register obj,Register temp,const NativeTemplateObject & templateObj,bool initContents)1103 void MacroAssembler::initGCSlots(Register obj, Register temp,
1104 const NativeTemplateObject& templateObj,
1105 bool initContents) {
1106 // Slots of non-array objects are required to be initialized.
1107 // Use the values currently in the template object.
1108 uint32_t nslots = templateObj.slotSpan();
1109 if (nslots == 0) {
1110 return;
1111 }
1112
1113 uint32_t nfixed = templateObj.numUsedFixedSlots();
1114 uint32_t ndynamic = templateObj.numDynamicSlots();
1115
1116 // Attempt to group slot writes such that we minimize the amount of
1117 // duplicated data we need to embed in code and load into registers. In
1118 // general, most template object slots will be undefined except for any
1119 // reserved slots. Since reserved slots come first, we split the object
1120 // logically into independent non-UndefinedValue writes to the head and
1121 // duplicated writes of UndefinedValue to the tail. For the majority of
1122 // objects, the "tail" will be the entire slot range.
1123 //
1124 // The template object may be a CallObject, in which case we need to
1125 // account for uninitialized lexical slots as well as undefined
1126 // slots. Unitialized lexical slots appears in CallObjects if the function
1127 // has parameter expressions, in which case closed over parameters have
1128 // TDZ. Uninitialized slots come before undefined slots in CallObjects.
1129 uint32_t startOfUninitialized = nslots;
1130 uint32_t startOfUndefined = nslots;
1131 FindStartOfUninitializedAndUndefinedSlots(
1132 templateObj, nslots, &startOfUninitialized, &startOfUndefined);
1133 MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
1134 MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
1135 MOZ_ASSERT_IF(!templateObj.isCallObject(),
1136 startOfUninitialized == startOfUndefined);
1137
1138 // Copy over any preserved reserved slots.
1139 copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
1140
1141 // Fill the rest of the fixed slots with undefined and uninitialized.
1142 if (initContents) {
1143 size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
1144 fillSlotsWithUninitialized(Address(obj, offset), temp, startOfUninitialized,
1145 std::min(startOfUndefined, nfixed));
1146
1147 offset = NativeObject::getFixedSlotOffset(startOfUndefined);
1148 fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined,
1149 nfixed);
1150 }
1151
1152 if (ndynamic) {
1153 // We are short one register to do this elegantly. Borrow the obj
1154 // register briefly for our slots base address.
1155 push(obj);
1156 loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
1157
1158 // Fill uninitialized slots if necessary. Otherwise initialize all
1159 // slots to undefined.
1160 if (startOfUndefined > nfixed) {
1161 MOZ_ASSERT(startOfUninitialized != startOfUndefined);
1162 fillSlotsWithUninitialized(Address(obj, 0), temp, 0,
1163 startOfUndefined - nfixed);
1164 size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
1165 fillSlotsWithUndefined(Address(obj, offset), temp,
1166 startOfUndefined - nfixed, ndynamic);
1167 } else {
1168 fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
1169 }
1170
1171 pop(obj);
1172 }
1173 }
1174
1175 #ifdef JS_GC_PROBES
TraceCreateObject(JSObject * obj)1176 static void TraceCreateObject(JSObject* obj) {
1177 AutoUnsafeCallWithABI unsafe;
1178 js::gc::gcprobes::CreateObject(obj);
1179 }
1180 #endif
1181
initGCThing(Register obj,Register temp,const TemplateObject & templateObj,bool initContents)1182 void MacroAssembler::initGCThing(Register obj, Register temp,
1183 const TemplateObject& templateObj,
1184 bool initContents) {
1185 // Fast initialization of an empty object returned by allocateObject().
1186
1187 storePtr(ImmGCPtr(templateObj.group()),
1188 Address(obj, JSObject::offsetOfGroup()));
1189
1190 storePtr(ImmGCPtr(templateObj.shape()),
1191 Address(obj, JSObject::offsetOfShape()));
1192
1193 if (templateObj.isNative()) {
1194 const NativeTemplateObject& ntemplate =
1195 templateObj.asNativeTemplateObject();
1196 MOZ_ASSERT_IF(!ntemplate.denseElementsAreCopyOnWrite(),
1197 !ntemplate.hasDynamicElements());
1198 MOZ_ASSERT_IF(ntemplate.convertDoubleElements(), ntemplate.isArrayObject());
1199
1200 // If the object has dynamic slots, the slots member has already been
1201 // filled in.
1202 if (!ntemplate.hasDynamicSlots()) {
1203 storePtr(ImmPtr(nullptr), Address(obj, NativeObject::offsetOfSlots()));
1204 }
1205
1206 if (ntemplate.denseElementsAreCopyOnWrite()) {
1207 storePtr(ImmPtr(ntemplate.getDenseElements()),
1208 Address(obj, NativeObject::offsetOfElements()));
1209 } else if (ntemplate.isArrayObject()) {
1210 int elementsOffset = NativeObject::offsetOfFixedElements();
1211
1212 computeEffectiveAddress(Address(obj, elementsOffset), temp);
1213 storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
1214
1215 // Fill in the elements header.
1216 store32(
1217 Imm32(ntemplate.getDenseCapacity()),
1218 Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
1219 store32(Imm32(ntemplate.getDenseInitializedLength()),
1220 Address(obj, elementsOffset +
1221 ObjectElements::offsetOfInitializedLength()));
1222 store32(Imm32(ntemplate.getArrayLength()),
1223 Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
1224 store32(Imm32(ntemplate.convertDoubleElements()
1225 ? ObjectElements::CONVERT_DOUBLE_ELEMENTS
1226 : 0),
1227 Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
1228 MOZ_ASSERT(!ntemplate.hasPrivate());
1229 } else if (ntemplate.isArgumentsObject()) {
1230 // The caller will initialize the reserved slots.
1231 MOZ_ASSERT(!initContents);
1232 MOZ_ASSERT(!ntemplate.hasPrivate());
1233 storePtr(ImmPtr(emptyObjectElements),
1234 Address(obj, NativeObject::offsetOfElements()));
1235 } else {
1236 // If the target type could be a TypedArray that maps shared memory
1237 // then this would need to store emptyObjectElementsShared in that case.
1238 MOZ_ASSERT(!ntemplate.isSharedMemory());
1239
1240 storePtr(ImmPtr(emptyObjectElements),
1241 Address(obj, NativeObject::offsetOfElements()));
1242
1243 initGCSlots(obj, temp, ntemplate, initContents);
1244
1245 if (ntemplate.hasPrivate() && !ntemplate.isTypedArrayObject()) {
1246 uint32_t nfixed = ntemplate.numFixedSlots();
1247 Address privateSlot(obj, NativeObject::getPrivateDataOffset(nfixed));
1248 if (ntemplate.isRegExpObject()) {
1249 // RegExpObject stores a GC thing (RegExpShared*) in its
1250 // private slot, so we have to use ImmGCPtr.
1251 storePtr(ImmGCPtr(ntemplate.regExpShared()), privateSlot);
1252 } else {
1253 storePtr(ImmPtr(ntemplate.getPrivate()), privateSlot);
1254 }
1255 }
1256 }
1257 } else {
1258 MOZ_CRASH("Unknown object");
1259 }
1260
1261 #ifdef JS_GC_PROBES
1262 AllocatableRegisterSet regs(RegisterSet::Volatile());
1263 LiveRegisterSet save(regs.asLiveSet());
1264 PushRegsInMask(save);
1265
1266 regs.takeUnchecked(obj);
1267 Register temp2 = regs.takeAnyGeneral();
1268
1269 setupUnalignedABICall(temp2);
1270 passABIArg(obj);
1271 callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceCreateObject));
1272
1273 PopRegsInMask(save);
1274 #endif
1275 }
1276
compareStrings(JSOp op,Register left,Register right,Register result,Label * fail)1277 void MacroAssembler::compareStrings(JSOp op, Register left, Register right,
1278 Register result, Label* fail) {
1279 MOZ_ASSERT(left != result);
1280 MOZ_ASSERT(right != result);
1281 MOZ_ASSERT(IsEqualityOp(op) || IsRelationalOp(op));
1282
1283 Label notPointerEqual;
1284 // If operands point to the same instance, the strings are trivially equal.
1285 branchPtr(Assembler::NotEqual, left, right,
1286 IsEqualityOp(op) ? ¬PointerEqual : fail);
1287 move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
1288 op == JSOp::Ge),
1289 result);
1290
1291 if (IsEqualityOp(op)) {
1292 Label done;
1293 jump(&done);
1294
1295 bind(¬PointerEqual);
1296
1297 Label leftIsNotAtom;
1298 Label setNotEqualResult;
1299 // Atoms cannot be equal to each other if they point to different strings.
1300 Imm32 atomBit(JSString::ATOM_BIT);
1301 branchTest32(Assembler::Zero, Address(left, JSString::offsetOfFlags()),
1302 atomBit, &leftIsNotAtom);
1303 branchTest32(Assembler::NonZero, Address(right, JSString::offsetOfFlags()),
1304 atomBit, &setNotEqualResult);
1305
1306 bind(&leftIsNotAtom);
1307 // Strings of different length can never be equal.
1308 loadStringLength(left, result);
1309 branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()),
1310 result, fail);
1311
1312 bind(&setNotEqualResult);
1313 move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), result);
1314
1315 bind(&done);
1316 }
1317 }
1318
loadStringChars(Register str,Register dest,CharEncoding encoding)1319 void MacroAssembler::loadStringChars(Register str, Register dest,
1320 CharEncoding encoding) {
1321 MOZ_ASSERT(str != dest);
1322
1323 if (JitOptions.spectreStringMitigations) {
1324 if (encoding == CharEncoding::Latin1) {
1325 // If the string is a rope, zero the |str| register. The code below
1326 // depends on str->flags so this should block speculative execution.
1327 movePtr(ImmWord(0), dest);
1328 test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1329 Imm32(JSString::LINEAR_BIT), dest, str);
1330 } else {
1331 // If we're loading TwoByte chars, there's an additional risk:
1332 // if the string has Latin1 chars, we could read out-of-bounds. To
1333 // prevent this, we check both the Linear and Latin1 bits. We don't
1334 // have a scratch register, so we use these flags also to block
1335 // speculative execution, similar to the use of 0 above.
1336 MOZ_ASSERT(encoding == CharEncoding::TwoByte);
1337 static constexpr uint32_t Mask =
1338 JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT;
1339 static_assert(Mask < 1024,
1340 "Mask should be a small, near-null value to ensure we "
1341 "block speculative execution when it's used as string "
1342 "pointer");
1343 move32(Imm32(Mask), dest);
1344 and32(Address(str, JSString::offsetOfFlags()), dest);
1345 cmp32MovePtr(Assembler::NotEqual, dest, Imm32(JSString::LINEAR_BIT), dest,
1346 str);
1347 }
1348 }
1349
1350 // Load the inline chars.
1351 computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
1352 dest);
1353
1354 // If it's not an inline string, load the non-inline chars. Use a
1355 // conditional move to prevent speculative execution.
1356 test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1357 Imm32(JSString::INLINE_CHARS_BIT),
1358 Address(str, JSString::offsetOfNonInlineChars()), dest);
1359 }
1360
loadNonInlineStringChars(Register str,Register dest,CharEncoding encoding)1361 void MacroAssembler::loadNonInlineStringChars(Register str, Register dest,
1362 CharEncoding encoding) {
1363 MOZ_ASSERT(str != dest);
1364
1365 if (JitOptions.spectreStringMitigations) {
1366 // If the string is a rope, has inline chars, or has a different
1367 // character encoding, set str to a near-null value to prevent
1368 // speculative execution below (when reading str->nonInlineChars).
1369
1370 static constexpr uint32_t Mask = JSString::LINEAR_BIT |
1371 JSString::INLINE_CHARS_BIT |
1372 JSString::LATIN1_CHARS_BIT;
1373 static_assert(Mask < 1024,
1374 "Mask should be a small, near-null value to ensure we "
1375 "block speculative execution when it's used as string "
1376 "pointer");
1377
1378 uint32_t expectedBits = JSString::LINEAR_BIT;
1379 if (encoding == CharEncoding::Latin1) {
1380 expectedBits |= JSString::LATIN1_CHARS_BIT;
1381 }
1382
1383 move32(Imm32(Mask), dest);
1384 and32(Address(str, JSString::offsetOfFlags()), dest);
1385
1386 cmp32MovePtr(Assembler::NotEqual, dest, Imm32(expectedBits), dest, str);
1387 }
1388
1389 loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
1390 }
1391
storeNonInlineStringChars(Register chars,Register str)1392 void MacroAssembler::storeNonInlineStringChars(Register chars, Register str) {
1393 MOZ_ASSERT(chars != str);
1394 storePtr(chars, Address(str, JSString::offsetOfNonInlineChars()));
1395 }
1396
loadInlineStringCharsForStore(Register str,Register dest)1397 void MacroAssembler::loadInlineStringCharsForStore(Register str,
1398 Register dest) {
1399 computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
1400 dest);
1401 }
1402
loadInlineStringChars(Register str,Register dest,CharEncoding encoding)1403 void MacroAssembler::loadInlineStringChars(Register str, Register dest,
1404 CharEncoding encoding) {
1405 MOZ_ASSERT(str != dest);
1406
1407 if (JitOptions.spectreStringMitigations) {
1408 // Making this Spectre-safe is a bit complicated: using
1409 // computeEffectiveAddress and then zeroing the output register if
1410 // non-inline is not sufficient: when the index is very large, it would
1411 // allow reading |nullptr + index|. Just fall back to loadStringChars
1412 // for now.
1413 loadStringChars(str, dest, encoding);
1414 } else {
1415 computeEffectiveAddress(
1416 Address(str, JSInlineString::offsetOfInlineStorage()), dest);
1417 }
1418 }
1419
loadRopeLeftChild(Register str,Register dest)1420 void MacroAssembler::loadRopeLeftChild(Register str, Register dest) {
1421 MOZ_ASSERT(str != dest);
1422
1423 if (JitOptions.spectreStringMitigations) {
1424 // Zero the output register if the input was not a rope.
1425 movePtr(ImmWord(0), dest);
1426 test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1427 Imm32(JSString::LINEAR_BIT),
1428 Address(str, JSRope::offsetOfLeft()), dest);
1429 } else {
1430 loadPtr(Address(str, JSRope::offsetOfLeft()), dest);
1431 }
1432 }
1433
storeRopeChildren(Register left,Register right,Register str)1434 void MacroAssembler::storeRopeChildren(Register left, Register right,
1435 Register str) {
1436 storePtr(left, Address(str, JSRope::offsetOfLeft()));
1437 storePtr(right, Address(str, JSRope::offsetOfRight()));
1438 }
1439
loadDependentStringBase(Register str,Register dest)1440 void MacroAssembler::loadDependentStringBase(Register str, Register dest) {
1441 MOZ_ASSERT(str != dest);
1442
1443 if (JitOptions.spectreStringMitigations) {
1444 // If the string is not a dependent string, zero the |str| register.
1445 // The code below loads str->base so this should block speculative
1446 // execution.
1447 movePtr(ImmWord(0), dest);
1448 test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1449 Imm32(JSString::DEPENDENT_BIT), dest, str);
1450 }
1451
1452 loadPtr(Address(str, JSDependentString::offsetOfBase()), dest);
1453 }
1454
storeDependentStringBase(Register base,Register str)1455 void MacroAssembler::storeDependentStringBase(Register base, Register str) {
1456 storePtr(base, Address(str, JSDependentString::offsetOfBase()));
1457 }
1458
loadStringChar(Register str,Register index,Register output,Register scratch,Label * fail)1459 void MacroAssembler::loadStringChar(Register str, Register index,
1460 Register output, Register scratch,
1461 Label* fail) {
1462 MOZ_ASSERT(str != output);
1463 MOZ_ASSERT(str != index);
1464 MOZ_ASSERT(index != output);
1465 MOZ_ASSERT(output != scratch);
1466
1467 movePtr(str, output);
1468
1469 // This follows JSString::getChar.
1470 Label notRope;
1471 branchIfNotRope(str, ¬Rope);
1472
1473 loadRopeLeftChild(str, output);
1474
1475 // Check if the index is contained in the leftChild.
1476 // Todo: Handle index in the rightChild.
1477 spectreBoundsCheck32(index, Address(output, JSString::offsetOfLength()),
1478 scratch, fail);
1479
1480 // If the left side is another rope, give up.
1481 branchIfRope(output, fail);
1482
1483 bind(¬Rope);
1484
1485 Label isLatin1, done;
1486 // We have to check the left/right side for ropes,
1487 // because a TwoByte rope might have a Latin1 child.
1488 branchLatin1String(output, &isLatin1);
1489 loadStringChars(output, scratch, CharEncoding::TwoByte);
1490 loadChar(scratch, index, output, CharEncoding::TwoByte);
1491 jump(&done);
1492
1493 bind(&isLatin1);
1494 loadStringChars(output, scratch, CharEncoding::Latin1);
1495 loadChar(scratch, index, output, CharEncoding::Latin1);
1496
1497 bind(&done);
1498 }
1499
loadStringIndexValue(Register str,Register dest,Label * fail)1500 void MacroAssembler::loadStringIndexValue(Register str, Register dest,
1501 Label* fail) {
1502 MOZ_ASSERT(str != dest);
1503
1504 load32(Address(str, JSString::offsetOfFlags()), dest);
1505
1506 // Does not have a cached index value.
1507 branchTest32(Assembler::Zero, dest, Imm32(JSString::INDEX_VALUE_BIT), fail);
1508
1509 // Extract the index.
1510 rshift32(Imm32(JSString::INDEX_VALUE_SHIFT), dest);
1511 }
1512
loadChar(Register chars,Register index,Register dest,CharEncoding encoding,int32_t offset)1513 void MacroAssembler::loadChar(Register chars, Register index, Register dest,
1514 CharEncoding encoding, int32_t offset /* = 0 */) {
1515 if (encoding == CharEncoding::Latin1) {
1516 loadChar(BaseIndex(chars, index, TimesOne, offset), dest, encoding);
1517 } else {
1518 loadChar(BaseIndex(chars, index, TimesTwo, offset), dest, encoding);
1519 }
1520 }
1521
addToCharPtr(Register chars,Register index,CharEncoding encoding)1522 void MacroAssembler::addToCharPtr(Register chars, Register index,
1523 CharEncoding encoding) {
1524 if (encoding == CharEncoding::Latin1) {
1525 static_assert(sizeof(char) == 1,
1526 "Latin-1 string index shouldn't need scaling");
1527 addPtr(index, chars);
1528 } else {
1529 computeEffectiveAddress(BaseIndex(chars, index, TimesTwo), chars);
1530 }
1531 }
1532
loadBigIntDigits(Register bigInt,Register digits)1533 void MacroAssembler::loadBigIntDigits(Register bigInt, Register digits) {
1534 MOZ_ASSERT(digits != bigInt);
1535
1536 // Load the inline digits.
1537 computeEffectiveAddress(Address(bigInt, BigInt::offsetOfInlineDigits()),
1538 digits);
1539
1540 // If inline digits aren't used, load the heap digits. Use a conditional move
1541 // to prevent speculative execution.
1542 cmp32LoadPtr(Assembler::GreaterThan,
1543 Address(bigInt, BigInt::offsetOfLength()),
1544 Imm32(int32_t(BigInt::inlineDigitsLength())),
1545 Address(bigInt, BigInt::offsetOfHeapDigits()), digits);
1546 }
1547
loadBigInt64(Register bigInt,Register64 dest)1548 void MacroAssembler::loadBigInt64(Register bigInt, Register64 dest) {
1549 // This code follows the implementation of |BigInt::toUint64()|. We're also
1550 // using it for inline callers of |BigInt::toInt64()|, which works, because
1551 // all supported Jit architectures use a two's complement representation for
1552 // int64 values, which means the WrapToSigned call in toInt64() is a no-op.
1553
1554 Label done, nonZero;
1555
1556 branch32(Assembler::NotEqual, Address(bigInt, BigInt::offsetOfLength()),
1557 Imm32(0), &nonZero);
1558 {
1559 move64(Imm64(0), dest);
1560 jump(&done);
1561 }
1562 bind(&nonZero);
1563
1564 #ifdef JS_PUNBOX64
1565 Register digits = dest.reg;
1566 #else
1567 Register digits = dest.high;
1568 #endif
1569
1570 loadBigIntDigits(bigInt, digits);
1571
1572 #if JS_PUNBOX64
1573 // Load the first digit into the destination register.
1574 load64(Address(digits, 0), dest);
1575 #else
1576 // Load the first digit into the destination register's low value.
1577 load32(Address(digits, 0), dest.low);
1578
1579 // And conditionally load the second digit into the high value register.
1580 Label twoDigits, digitsDone;
1581 branch32(Assembler::GreaterThan, Address(bigInt, BigInt::offsetOfLength()),
1582 Imm32(1), &twoDigits);
1583 {
1584 move32(Imm32(0), dest.high);
1585 jump(&digitsDone);
1586 }
1587 {
1588 bind(&twoDigits);
1589 load32(Address(digits, sizeof(BigInt::Digit)), dest.high);
1590 }
1591 bind(&digitsDone);
1592 #endif
1593
1594 branchTest32(Assembler::Zero, Address(bigInt, BigInt::offsetOfFlags()),
1595 Imm32(BigInt::signBitMask()), &done);
1596 neg64(dest);
1597
1598 bind(&done);
1599 }
1600
loadFirstBigIntDigitOrZero(Register bigInt,Register dest)1601 void MacroAssembler::loadFirstBigIntDigitOrZero(Register bigInt,
1602 Register dest) {
1603 Label done, nonZero;
1604 branch32(Assembler::NotEqual, Address(bigInt, BigInt::offsetOfLength()),
1605 Imm32(0), &nonZero);
1606 {
1607 movePtr(ImmWord(0), dest);
1608 jump(&done);
1609 }
1610 bind(&nonZero);
1611
1612 loadBigIntDigits(bigInt, dest);
1613
1614 // Load the first digit into the destination register.
1615 loadPtr(Address(dest, 0), dest);
1616
1617 bind(&done);
1618 }
1619
initializeBigInt64(Scalar::Type type,Register bigInt,Register64 val)1620 void MacroAssembler::initializeBigInt64(Scalar::Type type, Register bigInt,
1621 Register64 val) {
1622 MOZ_ASSERT(Scalar::isBigIntType(type));
1623
1624 store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
1625
1626 Label done, nonZero;
1627 branch64(Assembler::NotEqual, val, Imm64(0), &nonZero);
1628 {
1629 store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
1630 jump(&done);
1631 }
1632 bind(&nonZero);
1633
1634 if (type == Scalar::BigInt64) {
1635 // Set the sign-bit for negative values and then continue with the two's
1636 // complement.
1637 Label isPositive;
1638 branch64(Assembler::GreaterThan, val, Imm64(0), &isPositive);
1639 {
1640 store32(Imm32(BigInt::signBitMask()),
1641 Address(bigInt, BigInt::offsetOfFlags()));
1642 neg64(val);
1643 }
1644 bind(&isPositive);
1645 }
1646
1647 store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
1648
1649 static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
1650 "BigInt Digit size matches uintptr_t, so there's a single "
1651 "store on 64-bit and up to two stores on 32-bit");
1652
1653 #ifndef JS_PUNBOX64
1654 Label singleDigit;
1655 branchTest32(Assembler::Zero, val.high, val.high, &singleDigit);
1656 store32(Imm32(2), Address(bigInt, BigInt::offsetOfLength()));
1657 bind(&singleDigit);
1658
1659 // We can perform a single store64 on 32-bit platforms, because inline
1660 // storage can store at least two 32-bit integers.
1661 static_assert(BigInt::inlineDigitsLength() >= 2,
1662 "BigInt inline storage can store at least two digits");
1663 #endif
1664
1665 store64(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
1666
1667 bind(&done);
1668 }
1669
typeOfObject(Register obj,Register scratch,Label * slow,Label * isObject,Label * isCallable,Label * isUndefined)1670 void MacroAssembler::typeOfObject(Register obj, Register scratch, Label* slow,
1671 Label* isObject, Label* isCallable,
1672 Label* isUndefined) {
1673 loadObjClassUnsafe(obj, scratch);
1674
1675 // Proxies can emulate undefined and have complex isCallable behavior.
1676 branchTestClassIsProxy(true, scratch, slow);
1677
1678 // JSFunctions are always callable.
1679 branchPtr(Assembler::Equal, scratch, ImmPtr(&JSFunction::class_), isCallable);
1680
1681 // Objects that emulate undefined.
1682 Address flags(scratch, JSClass::offsetOfFlags());
1683 branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_EMULATES_UNDEFINED),
1684 isUndefined);
1685
1686 // Handle classes with a call hook.
1687 branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClass, cOps)),
1688 ImmPtr(nullptr), isObject);
1689
1690 loadPtr(Address(scratch, offsetof(JSClass, cOps)), scratch);
1691 branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClassOps, call)),
1692 ImmPtr(nullptr), isObject);
1693
1694 jump(isCallable);
1695 }
1696
isCallableOrConstructor(bool isCallable,Register obj,Register output,Label * isProxy)1697 void MacroAssembler::isCallableOrConstructor(bool isCallable, Register obj,
1698 Register output, Label* isProxy) {
1699 Label notFunction, hasCOps, done;
1700 loadObjClassUnsafe(obj, output);
1701
1702 // An object is callable iff:
1703 // is<JSFunction>() || (getClass()->cOps && getClass()->cOps->call).
1704 // An object is constructor iff:
1705 // ((is<JSFunction>() && as<JSFunction>().isConstructor) ||
1706 // (getClass()->cOps && getClass()->cOps->construct)).
1707 branchPtr(Assembler::NotEqual, output, ImmPtr(&JSFunction::class_),
1708 ¬Function);
1709 if (isCallable) {
1710 move32(Imm32(1), output);
1711 } else {
1712 static_assert(mozilla::IsPowerOfTwo(uint32_t(FunctionFlags::CONSTRUCTOR)),
1713 "FunctionFlags::CONSTRUCTOR has only one bit set");
1714
1715 load16ZeroExtend(Address(obj, JSFunction::offsetOfFlags()), output);
1716 rshift32(Imm32(mozilla::FloorLog2(uint32_t(FunctionFlags::CONSTRUCTOR))),
1717 output);
1718 and32(Imm32(1), output);
1719 }
1720 jump(&done);
1721
1722 bind(¬Function);
1723
1724 // Just skim proxies off. Their notion of isCallable()/isConstructor() is
1725 // more complicated.
1726 branchTestClassIsProxy(true, output, isProxy);
1727
1728 branchPtr(Assembler::NonZero, Address(output, offsetof(JSClass, cOps)),
1729 ImmPtr(nullptr), &hasCOps);
1730 move32(Imm32(0), output);
1731 jump(&done);
1732
1733 bind(&hasCOps);
1734 loadPtr(Address(output, offsetof(JSClass, cOps)), output);
1735 size_t opsOffset =
1736 isCallable ? offsetof(JSClassOps, call) : offsetof(JSClassOps, construct);
1737 cmpPtrSet(Assembler::NonZero, Address(output, opsOffset), ImmPtr(nullptr),
1738 output);
1739
1740 bind(&done);
1741 }
1742
loadJSContext(Register dest)1743 void MacroAssembler::loadJSContext(Register dest) {
1744 JitContext* jcx = GetJitContext();
1745 movePtr(ImmPtr(jcx->runtime->mainContextPtr()), dest);
1746 }
1747
ContextRealmPtr()1748 static const uint8_t* ContextRealmPtr() {
1749 return (
1750 static_cast<const uint8_t*>(GetJitContext()->runtime->mainContextPtr()) +
1751 JSContext::offsetOfRealm());
1752 }
1753
switchToRealm(Register realm)1754 void MacroAssembler::switchToRealm(Register realm) {
1755 storePtr(realm, AbsoluteAddress(ContextRealmPtr()));
1756 }
1757
switchToRealm(const void * realm,Register scratch)1758 void MacroAssembler::switchToRealm(const void* realm, Register scratch) {
1759 MOZ_ASSERT(realm);
1760
1761 movePtr(ImmPtr(realm), scratch);
1762 switchToRealm(scratch);
1763 }
1764
switchToObjectRealm(Register obj,Register scratch)1765 void MacroAssembler::switchToObjectRealm(Register obj, Register scratch) {
1766 loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
1767 loadPtr(Address(scratch, ObjectGroup::offsetOfRealm()), scratch);
1768 switchToRealm(scratch);
1769 }
1770
switchToBaselineFrameRealm(Register scratch)1771 void MacroAssembler::switchToBaselineFrameRealm(Register scratch) {
1772 Address envChain(BaselineFrameReg,
1773 BaselineFrame::reverseOffsetOfEnvironmentChain());
1774 loadPtr(envChain, scratch);
1775 switchToObjectRealm(scratch, scratch);
1776 }
1777
switchToWasmTlsRealm(Register scratch1,Register scratch2)1778 void MacroAssembler::switchToWasmTlsRealm(Register scratch1,
1779 Register scratch2) {
1780 loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), scratch1);
1781 loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, realm)), scratch2);
1782 storePtr(scratch2, Address(scratch1, JSContext::offsetOfRealm()));
1783 }
1784
debugAssertContextRealm(const void * realm,Register scratch)1785 void MacroAssembler::debugAssertContextRealm(const void* realm,
1786 Register scratch) {
1787 #ifdef DEBUG
1788 Label ok;
1789 movePtr(ImmPtr(realm), scratch);
1790 branchPtr(Assembler::Equal, AbsoluteAddress(ContextRealmPtr()), scratch, &ok);
1791 assumeUnreachable("Unexpected context realm");
1792 bind(&ok);
1793 #endif
1794 }
1795
guardGroupHasUnanalyzedNewScript(Register group,Register scratch,Label * fail)1796 void MacroAssembler::guardGroupHasUnanalyzedNewScript(Register group,
1797 Register scratch,
1798 Label* fail) {
1799 Label noNewScript;
1800 load32(Address(group, ObjectGroup::offsetOfFlags()), scratch);
1801 and32(Imm32(OBJECT_FLAG_ADDENDUM_MASK), scratch);
1802 branch32(Assembler::NotEqual, scratch,
1803 Imm32(uint32_t(ObjectGroup::Addendum_NewScript)
1804 << OBJECT_FLAG_ADDENDUM_SHIFT),
1805 &noNewScript);
1806
1807 // Guard group->newScript()->preliminaryObjects is non-nullptr.
1808 loadPtr(Address(group, ObjectGroup::offsetOfAddendum()), scratch);
1809 branchPtr(Assembler::Equal,
1810 Address(scratch, TypeNewScript::offsetOfPreliminaryObjects()),
1811 ImmWord(0), fail);
1812
1813 bind(&noNewScript);
1814 }
1815
generateBailoutTail(Register scratch,Register bailoutInfo)1816 void MacroAssembler::generateBailoutTail(Register scratch,
1817 Register bailoutInfo) {
1818 loadJSContext(scratch);
1819 enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
1820
1821 branchIfFalseBool(ReturnReg, exceptionLabel());
1822
1823 // Finish bailing out to Baseline.
1824 {
1825 // Prepare a register set for use in this case.
1826 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
1827 MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()),
1828 !regs.has(AsRegister(getStackPointer())));
1829 regs.take(bailoutInfo);
1830
1831 // Reset SP to the point where clobbering starts.
1832 loadStackPtr(
1833 Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)));
1834
1835 Register copyCur = regs.takeAny();
1836 Register copyEnd = regs.takeAny();
1837 Register temp = regs.takeAny();
1838
1839 // Copy data onto stack.
1840 loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)),
1841 copyCur);
1842 loadPtr(
1843 Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)),
1844 copyEnd);
1845 {
1846 Label copyLoop;
1847 Label endOfCopy;
1848 bind(©Loop);
1849 branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
1850 subPtr(Imm32(4), copyCur);
1851 subFromStackPtr(Imm32(4));
1852 load32(Address(copyCur, 0), temp);
1853 store32(temp, Address(getStackPointer(), 0));
1854 jump(©Loop);
1855 bind(&endOfCopy);
1856 }
1857
1858 // Enter exit frame for the FinishBailoutToBaseline call.
1859 load32(Address(bailoutInfo,
1860 offsetof(BaselineBailoutInfo, frameSizeOfInnerMostFrame)),
1861 temp);
1862 makeFrameDescriptor(temp, FrameType::BaselineJS, ExitFrameLayout::Size());
1863 push(temp);
1864 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1865 // No GC things to mark on the stack, push a bare token.
1866 loadJSContext(scratch);
1867 enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
1868
1869 // Save needed values onto stack temporarily.
1870 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
1871 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1872
1873 // Call a stub to free allocated memory and create arguments objects.
1874 setupUnalignedABICall(temp);
1875 passABIArg(bailoutInfo);
1876 callWithABI(JS_FUNC_TO_DATA_PTR(void*, FinishBailoutToBaseline),
1877 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1878 branchIfFalseBool(ReturnReg, exceptionLabel());
1879
1880 // Restore values where they need to be and resume execution.
1881 AllocatableGeneralRegisterSet enterRegs(GeneralRegisterSet::All());
1882 enterRegs.take(BaselineFrameReg);
1883 Register jitcodeReg = enterRegs.takeAny();
1884
1885 pop(jitcodeReg);
1886 pop(BaselineFrameReg);
1887
1888 // Discard exit frame.
1889 addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
1890
1891 jump(jitcodeReg);
1892 }
1893 }
1894
assertRectifierFrameParentType(Register frameType)1895 void MacroAssembler::assertRectifierFrameParentType(Register frameType) {
1896 #ifdef DEBUG
1897 {
1898 // Check the possible previous frame types here.
1899 Label checkOk;
1900 branch32(Assembler::Equal, frameType, Imm32(FrameType::IonJS), &checkOk);
1901 branch32(Assembler::Equal, frameType, Imm32(FrameType::BaselineStub),
1902 &checkOk);
1903 branch32(Assembler::Equal, frameType, Imm32(FrameType::WasmToJSJit),
1904 &checkOk);
1905 branch32(Assembler::Equal, frameType, Imm32(FrameType::CppToJSJit),
1906 &checkOk);
1907 assumeUnreachable("Unrecognized frame type preceding RectifierFrame.");
1908 bind(&checkOk);
1909 }
1910 #endif
1911 }
1912
loadJitCodeRaw(Register func,Register dest)1913 void MacroAssembler::loadJitCodeRaw(Register func, Register dest) {
1914 static_assert(BaseScript::offsetOfJitCodeRaw() ==
1915 SelfHostedLazyScript::offsetOfJitCodeRaw(),
1916 "SelfHostedLazyScript and BaseScript must use same layout for "
1917 "jitCodeRaw_");
1918 loadPtr(Address(func, JSFunction::offsetOfScript()), dest);
1919 loadPtr(Address(dest, BaseScript::offsetOfJitCodeRaw()), dest);
1920 }
1921
loadJitCodeNoArgCheck(Register func,Register dest)1922 void MacroAssembler::loadJitCodeNoArgCheck(Register func, Register dest) {
1923 #ifdef DEBUG
1924 {
1925 Label ok;
1926 int32_t flags = FunctionFlags::BASESCRIPT;
1927 branchTestFunctionFlags(func, flags, Assembler::NonZero, &ok);
1928 assumeUnreachable("Function has no BaseScript!");
1929 bind(&ok);
1930 }
1931 #endif
1932
1933 static_assert(ScriptWarmUpData::JitScriptTag == 0,
1934 "Code below depends on tag value");
1935 Imm32 tagMask(ScriptWarmUpData::TagMask);
1936
1937 // Read jitCodeSkipArgCheck.
1938 loadPtr(Address(func, JSFunction::offsetOfScript()), dest);
1939 loadPtr(Address(dest, BaseScript::offsetOfWarmUpData()), dest);
1940 #ifdef DEBUG
1941 {
1942 Label ok;
1943 branchTestPtr(Assembler::Zero, dest, tagMask, &ok);
1944 assumeUnreachable("Function has no JitScript!");
1945 bind(&ok);
1946 }
1947 #endif
1948 loadPtr(Address(dest, JitScript::offsetOfJitCodeSkipArgCheck()), dest);
1949 }
1950
loadBaselineFramePtr(Register framePtr,Register dest)1951 void MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest) {
1952 if (framePtr != dest) {
1953 movePtr(framePtr, dest);
1954 }
1955 subPtr(Imm32(BaselineFrame::Size()), dest);
1956 }
1957
handleFailure()1958 void MacroAssembler::handleFailure() {
1959 // Re-entry code is irrelevant because the exception will leave the
1960 // running function and never come back
1961 TrampolinePtr excTail =
1962 GetJitContext()->runtime->jitRuntime()->getExceptionTail();
1963 jump(excTail);
1964 }
1965
1966 #ifdef JS_MASM_VERBOSE
AssumeUnreachable_(const char * output)1967 static void AssumeUnreachable_(const char* output) {
1968 MOZ_ReportAssertionFailure(output, __FILE__, __LINE__);
1969 }
1970 #endif
1971
assumeUnreachable(const char * output)1972 void MacroAssembler::assumeUnreachable(const char* output) {
1973 #ifdef JS_MASM_VERBOSE
1974 if (!IsCompilingWasm()) {
1975 AllocatableRegisterSet regs(RegisterSet::Volatile());
1976 LiveRegisterSet save(regs.asLiveSet());
1977 PushRegsInMask(save);
1978 Register temp = regs.takeAnyGeneral();
1979
1980 setupUnalignedABICall(temp);
1981 movePtr(ImmPtr(output), temp);
1982 passABIArg(temp);
1983 callWithABI(JS_FUNC_TO_DATA_PTR(void*, AssumeUnreachable_), MoveOp::GENERAL,
1984 CheckUnsafeCallWithABI::DontCheckOther);
1985
1986 PopRegsInMask(save);
1987 }
1988 #endif
1989
1990 breakpoint();
1991 }
1992
1993 template <typename T>
assertTestInt32(Condition cond,const T & value,const char * output)1994 void MacroAssembler::assertTestInt32(Condition cond, const T& value,
1995 const char* output) {
1996 #ifdef DEBUG
1997 Label ok;
1998 branchTestInt32(cond, value, &ok);
1999 assumeUnreachable(output);
2000 bind(&ok);
2001 #endif
2002 }
2003
2004 template void MacroAssembler::assertTestInt32(Condition, const Address&,
2005 const char*);
2006
2007 #ifdef JS_MASM_VERBOSE
Printf0_(const char * output)2008 static void Printf0_(const char* output) {
2009 AutoUnsafeCallWithABI unsafe;
2010
2011 // Use stderr instead of stdout because this is only used for debug
2012 // output. stderr is less likely to interfere with the program's normal
2013 // output, and it's always unbuffered.
2014 fprintf(stderr, "%s", output);
2015 }
2016 #endif
2017
printf(const char * output)2018 void MacroAssembler::printf(const char* output) {
2019 #ifdef JS_MASM_VERBOSE
2020 AllocatableRegisterSet regs(RegisterSet::Volatile());
2021 LiveRegisterSet save(regs.asLiveSet());
2022 PushRegsInMask(save);
2023
2024 Register temp = regs.takeAnyGeneral();
2025
2026 setupUnalignedABICall(temp);
2027 movePtr(ImmPtr(output), temp);
2028 passABIArg(temp);
2029 callWithABI(JS_FUNC_TO_DATA_PTR(void*, Printf0_));
2030
2031 PopRegsInMask(save);
2032 #endif
2033 }
2034
2035 #ifdef JS_MASM_VERBOSE
Printf1_(const char * output,uintptr_t value)2036 static void Printf1_(const char* output, uintptr_t value) {
2037 AutoUnsafeCallWithABI unsafe;
2038 AutoEnterOOMUnsafeRegion oomUnsafe;
2039 js::UniqueChars line = JS_sprintf_append(nullptr, output, value);
2040 if (!line) {
2041 oomUnsafe.crash("OOM at masm.printf");
2042 }
2043 fprintf(stderr, "%s", line.get());
2044 }
2045 #endif
2046
printf(const char * output,Register value)2047 void MacroAssembler::printf(const char* output, Register value) {
2048 #ifdef JS_MASM_VERBOSE
2049 AllocatableRegisterSet regs(RegisterSet::Volatile());
2050 LiveRegisterSet save(regs.asLiveSet());
2051 PushRegsInMask(save);
2052
2053 regs.takeUnchecked(value);
2054
2055 Register temp = regs.takeAnyGeneral();
2056
2057 setupUnalignedABICall(temp);
2058 movePtr(ImmPtr(output), temp);
2059 passABIArg(temp);
2060 passABIArg(value);
2061 callWithABI(JS_FUNC_TO_DATA_PTR(void*, Printf1_));
2062
2063 PopRegsInMask(save);
2064 #endif
2065 }
2066
2067 #ifdef JS_TRACE_LOGGING
tracelogStartId(Register logger,uint32_t textId,bool force)2068 void MacroAssembler::tracelogStartId(Register logger, uint32_t textId,
2069 bool force) {
2070 if (!force && !TraceLogTextIdEnabled(textId)) {
2071 return;
2072 }
2073
2074 AllocatableRegisterSet regs(RegisterSet::Volatile());
2075 LiveRegisterSet save(regs.asLiveSet());
2076 PushRegsInMask(save);
2077 regs.takeUnchecked(logger);
2078
2079 Register temp = regs.takeAnyGeneral();
2080
2081 setupUnalignedABICall(temp);
2082 passABIArg(logger);
2083 move32(Imm32(textId), temp);
2084 passABIArg(temp);
2085 callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStartEventPrivate),
2086 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
2087
2088 PopRegsInMask(save);
2089 }
2090
tracelogStartId(Register logger,Register textId)2091 void MacroAssembler::tracelogStartId(Register logger, Register textId) {
2092 AllocatableRegisterSet regs(RegisterSet::Volatile());
2093 LiveRegisterSet save(regs.asLiveSet());
2094 PushRegsInMask(save);
2095 regs.takeUnchecked(logger);
2096 regs.takeUnchecked(textId);
2097
2098 Register temp = regs.takeAnyGeneral();
2099
2100 setupUnalignedABICall(temp);
2101 passABIArg(logger);
2102 passABIArg(textId);
2103 callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStartEventPrivate),
2104 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
2105
2106 PopRegsInMask(save);
2107 }
2108
tracelogStartEvent(Register logger,Register event)2109 void MacroAssembler::tracelogStartEvent(Register logger, Register event) {
2110 void (&TraceLogFunc)(TraceLoggerThread*, const TraceLoggerEvent&) =
2111 TraceLogStartEvent;
2112
2113 AllocatableRegisterSet regs(RegisterSet::Volatile());
2114 LiveRegisterSet save(regs.asLiveSet());
2115 PushRegsInMask(save);
2116 regs.takeUnchecked(logger);
2117 regs.takeUnchecked(event);
2118
2119 Register temp = regs.takeAnyGeneral();
2120
2121 setupUnalignedABICall(temp);
2122 passABIArg(logger);
2123 passABIArg(event);
2124 callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogFunc), MoveOp::GENERAL,
2125 CheckUnsafeCallWithABI::DontCheckOther);
2126
2127 PopRegsInMask(save);
2128 }
2129
tracelogStopId(Register logger,uint32_t textId,bool force)2130 void MacroAssembler::tracelogStopId(Register logger, uint32_t textId,
2131 bool force) {
2132 if (!force && !TraceLogTextIdEnabled(textId)) {
2133 return;
2134 }
2135
2136 AllocatableRegisterSet regs(RegisterSet::Volatile());
2137 LiveRegisterSet save(regs.asLiveSet());
2138 PushRegsInMask(save);
2139 regs.takeUnchecked(logger);
2140
2141 Register temp = regs.takeAnyGeneral();
2142
2143 setupUnalignedABICall(temp);
2144 passABIArg(logger);
2145 move32(Imm32(textId), temp);
2146 passABIArg(temp);
2147
2148 callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStopEventPrivate),
2149 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
2150
2151 PopRegsInMask(save);
2152 }
2153
tracelogStopId(Register logger,Register textId)2154 void MacroAssembler::tracelogStopId(Register logger, Register textId) {
2155 AllocatableRegisterSet regs(RegisterSet::Volatile());
2156 LiveRegisterSet save(regs.asLiveSet());
2157 PushRegsInMask(save);
2158 regs.takeUnchecked(logger);
2159 regs.takeUnchecked(textId);
2160
2161 Register temp = regs.takeAnyGeneral();
2162
2163 setupUnalignedABICall(temp);
2164 passABIArg(logger);
2165 passABIArg(textId);
2166 callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStopEventPrivate),
2167 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
2168
2169 PopRegsInMask(save);
2170 }
2171 #endif
2172
convertInt32ValueToDouble(const Address & address,Register scratch,Label * done)2173 void MacroAssembler::convertInt32ValueToDouble(const Address& address,
2174 Register scratch, Label* done) {
2175 branchTestInt32(Assembler::NotEqual, address, done);
2176 unboxInt32(address, scratch);
2177 ScratchDoubleScope fpscratch(*this);
2178 convertInt32ToDouble(scratch, fpscratch);
2179 storeDouble(fpscratch, address);
2180 }
2181
convertInt32ValueToDouble(ValueOperand val)2182 void MacroAssembler::convertInt32ValueToDouble(ValueOperand val) {
2183 Label done;
2184 branchTestInt32(Assembler::NotEqual, val, &done);
2185 unboxInt32(val, val.scratchReg());
2186 ScratchDoubleScope fpscratch(*this);
2187 convertInt32ToDouble(val.scratchReg(), fpscratch);
2188 boxDouble(fpscratch, val, fpscratch);
2189 bind(&done);
2190 }
2191
convertValueToFloatingPoint(ValueOperand value,FloatRegister output,Label * fail,MIRType outputType)2192 void MacroAssembler::convertValueToFloatingPoint(ValueOperand value,
2193 FloatRegister output,
2194 Label* fail,
2195 MIRType outputType) {
2196 Label isDouble, isInt32, isBool, isNull, done;
2197
2198 {
2199 ScratchTagScope tag(*this, value);
2200 splitTagForTest(value, tag);
2201
2202 branchTestDouble(Assembler::Equal, tag, &isDouble);
2203 branchTestInt32(Assembler::Equal, tag, &isInt32);
2204 branchTestBoolean(Assembler::Equal, tag, &isBool);
2205 branchTestNull(Assembler::Equal, tag, &isNull);
2206 branchTestUndefined(Assembler::NotEqual, tag, fail);
2207 }
2208
2209 // fall-through: undefined
2210 loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output,
2211 outputType);
2212 jump(&done);
2213
2214 bind(&isNull);
2215 loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
2216 jump(&done);
2217
2218 bind(&isBool);
2219 boolValueToFloatingPoint(value, output, outputType);
2220 jump(&done);
2221
2222 bind(&isInt32);
2223 int32ValueToFloatingPoint(value, output, outputType);
2224 jump(&done);
2225
2226 // On some non-multiAlias platforms, unboxDouble may use the scratch register,
2227 // so do not merge code paths here.
2228 bind(&isDouble);
2229 if (outputType == MIRType::Float32 && hasMultiAlias()) {
2230 ScratchDoubleScope tmp(*this);
2231 unboxDouble(value, tmp);
2232 convertDoubleToFloat32(tmp, output);
2233 } else {
2234 FloatRegister tmp = output.asDouble();
2235 unboxDouble(value, tmp);
2236 if (outputType == MIRType::Float32) {
2237 convertDoubleToFloat32(tmp, output);
2238 }
2239 }
2240
2241 bind(&done);
2242 }
2243
outOfLineTruncateSlow(FloatRegister src,Register dest,bool widenFloatToDouble,bool compilingWasm,wasm::BytecodeOffset callOffset)2244 void MacroAssembler::outOfLineTruncateSlow(FloatRegister src, Register dest,
2245 bool widenFloatToDouble,
2246 bool compilingWasm,
2247 wasm::BytecodeOffset callOffset) {
2248 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
2249 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
2250 ScratchDoubleScope fpscratch(*this);
2251 if (widenFloatToDouble) {
2252 convertFloat32ToDouble(src, fpscratch);
2253 src = fpscratch;
2254 }
2255 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2256 FloatRegister srcSingle;
2257 if (widenFloatToDouble) {
2258 MOZ_ASSERT(src.isSingle());
2259 srcSingle = src;
2260 src = src.asDouble();
2261 Push(srcSingle);
2262 convertFloat32ToDouble(srcSingle, src);
2263 }
2264 #else
2265 // Also see below
2266 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
2267 #endif
2268
2269 MOZ_ASSERT(src.isDouble());
2270
2271 if (compilingWasm) {
2272 setupWasmABICall();
2273 passABIArg(src, MoveOp::DOUBLE);
2274 callWithABI(callOffset, wasm::SymbolicAddress::ToInt32);
2275 } else {
2276 setupUnalignedABICall(dest);
2277 passABIArg(src, MoveOp::DOUBLE);
2278 callWithABI(mozilla::BitwiseCast<void*, int32_t (*)(double)>(JS::ToInt32),
2279 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
2280 }
2281 storeCallInt32Result(dest);
2282
2283 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
2284 defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
2285 // Nothing
2286 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2287 if (widenFloatToDouble) {
2288 Pop(srcSingle);
2289 }
2290 #else
2291 MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
2292 #endif
2293 }
2294
convertDoubleToInt(FloatRegister src,Register output,FloatRegister temp,Label * truncateFail,Label * fail,IntConversionBehavior behavior)2295 void MacroAssembler::convertDoubleToInt(FloatRegister src, Register output,
2296 FloatRegister temp, Label* truncateFail,
2297 Label* fail,
2298 IntConversionBehavior behavior) {
2299 switch (behavior) {
2300 case IntConversionBehavior::Normal:
2301 case IntConversionBehavior::NegativeZeroCheck:
2302 convertDoubleToInt32(
2303 src, output, fail,
2304 behavior == IntConversionBehavior::NegativeZeroCheck);
2305 break;
2306 case IntConversionBehavior::Truncate:
2307 branchTruncateDoubleMaybeModUint32(src, output,
2308 truncateFail ? truncateFail : fail);
2309 break;
2310 case IntConversionBehavior::TruncateNoWrap:
2311 branchTruncateDoubleToInt32(src, output,
2312 truncateFail ? truncateFail : fail);
2313 break;
2314 case IntConversionBehavior::ClampToUint8:
2315 // Clamping clobbers the input register, so use a temp.
2316 if (src != temp) {
2317 moveDouble(src, temp);
2318 }
2319 clampDoubleToUint8(temp, output);
2320 break;
2321 }
2322 }
2323
convertValueToInt(ValueOperand value,MDefinition * maybeInput,Label * handleStringEntry,Label * handleStringRejoin,Label * truncateDoubleSlow,Register stringReg,FloatRegister temp,Register output,Label * fail,IntConversionBehavior behavior,IntConversionInputKind conversion)2324 void MacroAssembler::convertValueToInt(
2325 ValueOperand value, MDefinition* maybeInput, Label* handleStringEntry,
2326 Label* handleStringRejoin, Label* truncateDoubleSlow, Register stringReg,
2327 FloatRegister temp, Register output, Label* fail,
2328 IntConversionBehavior behavior, IntConversionInputKind conversion) {
2329 Label done, isInt32, isBool, isDouble, isNull, isString;
2330
2331 bool handleStrings = (behavior == IntConversionBehavior::Truncate ||
2332 behavior == IntConversionBehavior::ClampToUint8) &&
2333 handleStringEntry && handleStringRejoin;
2334
2335 MOZ_ASSERT_IF(handleStrings, conversion == IntConversionInputKind::Any);
2336
2337 {
2338 ScratchTagScope tag(*this, value);
2339 splitTagForTest(value, tag);
2340
2341 maybeBranchTestType(MIRType::Int32, maybeInput, tag, &isInt32);
2342 if (conversion == IntConversionInputKind::Any ||
2343 conversion == IntConversionInputKind::NumbersOrBoolsOnly) {
2344 maybeBranchTestType(MIRType::Boolean, maybeInput, tag, &isBool);
2345 }
2346 maybeBranchTestType(MIRType::Double, maybeInput, tag, &isDouble);
2347
2348 if (conversion == IntConversionInputKind::Any) {
2349 // If we are not truncating, we fail for anything that's not
2350 // null. Otherwise we might be able to handle strings and undefined.
2351 switch (behavior) {
2352 case IntConversionBehavior::Normal:
2353 case IntConversionBehavior::NegativeZeroCheck:
2354 branchTestNull(Assembler::NotEqual, tag, fail);
2355 break;
2356
2357 case IntConversionBehavior::Truncate:
2358 case IntConversionBehavior::TruncateNoWrap:
2359 case IntConversionBehavior::ClampToUint8:
2360 maybeBranchTestType(MIRType::Null, maybeInput, tag, &isNull);
2361 if (handleStrings) {
2362 maybeBranchTestType(MIRType::String, maybeInput, tag, &isString);
2363 }
2364 branchTestUndefined(Assembler::NotEqual, tag, fail);
2365 break;
2366 }
2367 } else {
2368 jump(fail);
2369 }
2370 }
2371
2372 // The value is null or undefined in truncation contexts - just emit 0.
2373 if (isNull.used()) {
2374 bind(&isNull);
2375 }
2376 mov(ImmWord(0), output);
2377 jump(&done);
2378
2379 // |output| needs to be different from |stringReg| to load string indices.
2380 bool handleStringIndices = handleStrings && output != stringReg;
2381
2382 // First try loading a string index. If that fails, try converting a string
2383 // into a double, then jump to the double case.
2384 Label handleStringIndex;
2385 if (handleStrings) {
2386 bind(&isString);
2387 unboxString(value, stringReg);
2388 if (handleStringIndices) {
2389 loadStringIndexValue(stringReg, output, handleStringEntry);
2390 jump(&handleStringIndex);
2391 } else {
2392 jump(handleStringEntry);
2393 }
2394 }
2395
2396 // Try converting double into integer.
2397 if (isDouble.used() || handleStrings) {
2398 if (isDouble.used()) {
2399 bind(&isDouble);
2400 unboxDouble(value, temp);
2401 }
2402
2403 if (handleStrings) {
2404 bind(handleStringRejoin);
2405 }
2406
2407 convertDoubleToInt(temp, output, temp, truncateDoubleSlow, fail, behavior);
2408 jump(&done);
2409 }
2410
2411 // Just unbox a bool, the result is 0 or 1.
2412 if (isBool.used()) {
2413 bind(&isBool);
2414 unboxBoolean(value, output);
2415 jump(&done);
2416 }
2417
2418 // Integers can be unboxed.
2419 if (isInt32.used() || handleStringIndices) {
2420 if (isInt32.used()) {
2421 bind(&isInt32);
2422 unboxInt32(value, output);
2423 }
2424
2425 if (handleStringIndices) {
2426 bind(&handleStringIndex);
2427 }
2428
2429 if (behavior == IntConversionBehavior::ClampToUint8) {
2430 clampIntToUint8(output);
2431 }
2432 }
2433
2434 bind(&done);
2435 }
2436
finish()2437 void MacroAssembler::finish() {
2438 if (failureLabel_.used()) {
2439 bind(&failureLabel_);
2440 handleFailure();
2441 }
2442
2443 MacroAssemblerSpecific::finish();
2444
2445 MOZ_RELEASE_ASSERT(
2446 size() <= MaxCodeBytesPerProcess,
2447 "AssemblerBuffer should ensure we don't exceed MaxCodeBytesPerProcess");
2448
2449 if (bytesNeeded() > MaxCodeBytesPerProcess) {
2450 setOOM();
2451 }
2452 }
2453
link(JitCode * code)2454 void MacroAssembler::link(JitCode* code) {
2455 MOZ_ASSERT(!oom());
2456 linkProfilerCallSites(code);
2457 }
2458
2459 MacroAssembler::AutoProfilerCallInstrumentation::
AutoProfilerCallInstrumentation(MacroAssembler & masm MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)2460 AutoProfilerCallInstrumentation(
2461 MacroAssembler& masm MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL) {
2462 MOZ_GUARD_OBJECT_NOTIFIER_INIT;
2463 if (!masm.emitProfilingInstrumentation_) {
2464 return;
2465 }
2466
2467 Register reg = CallTempReg0;
2468 Register reg2 = CallTempReg1;
2469 masm.push(reg);
2470 masm.push(reg2);
2471
2472 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
2473 masm.loadJSContext(reg2);
2474 masm.loadPtr(Address(reg2, offsetof(JSContext, profilingActivation_)), reg2);
2475 masm.storePtr(reg,
2476 Address(reg2, JitActivation::offsetOfLastProfilingCallSite()));
2477
2478 masm.appendProfilerCallSite(label);
2479
2480 masm.pop(reg2);
2481 masm.pop(reg);
2482 }
2483
linkProfilerCallSites(JitCode * code)2484 void MacroAssembler::linkProfilerCallSites(JitCode* code) {
2485 for (size_t i = 0; i < profilerCallSites_.length(); i++) {
2486 CodeOffset offset = profilerCallSites_[i];
2487 CodeLocationLabel location(code, offset);
2488 PatchDataWithValueCheck(location, ImmPtr(location.raw()),
2489 ImmPtr((void*)-1));
2490 }
2491 }
2492
alignJitStackBasedOnNArgs(Register nargs,bool countIncludesThis)2493 void MacroAssembler::alignJitStackBasedOnNArgs(Register nargs,
2494 bool countIncludesThis) {
2495 // The stack should already be aligned to the size of a value.
2496 assertStackAlignment(sizeof(Value), 0);
2497
2498 static_assert(JitStackValueAlignment == 1 || JitStackValueAlignment == 2,
2499 "JitStackValueAlignment is either 1 or 2.");
2500 if (JitStackValueAlignment == 1) {
2501 return;
2502 }
2503 // A jit frame is composed of the following:
2504 //
2505 // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
2506 // \________JitFrameLayout_________/
2507 // (The stack grows this way --->)
2508 //
2509 // We want to ensure that |raddr|, the return address, is 16-byte aligned.
2510 // (Note: if 8-byte alignment was sufficient, we would have already
2511 // returned above.)
2512
2513 // JitFrameLayout does not affect the alignment, so we can ignore it.
2514 static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
2515 "JitFrameLayout doesn't affect stack alignment");
2516
2517 // Therefore, we need to ensure that |this| is aligned.
2518 // This implies that |argN| must be aligned if N is even,
2519 // and offset by |sizeof(Value)| if N is odd.
2520
2521 // Depending on the context of the caller, it may be easier to pass in a
2522 // register that has already been modified to include |this|. If that is the
2523 // case, we want to flip the direction of the test.
2524 Assembler::Condition condition =
2525 countIncludesThis ? Assembler::NonZero : Assembler::Zero;
2526
2527 Label alignmentIsOffset, end;
2528 branchTestPtr(condition, nargs, Imm32(1), &alignmentIsOffset);
2529
2530 // |argN| should be aligned to 16 bytes.
2531 andToStackPtr(Imm32(~(JitStackAlignment - 1)));
2532 jump(&end);
2533
2534 // |argN| should be offset by 8 bytes from 16-byte alignment.
2535 // We already know that it is 8-byte aligned, so the only possibilities are:
2536 // a) It is 16-byte aligned, and we must offset it by 8 bytes.
2537 // b) It is not 16-byte aligned, and therefore already has the right offset.
2538 // Therefore, we test to see if it is 16-byte aligned, and adjust it if it is.
2539 bind(&alignmentIsOffset);
2540 branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
2541 subFromStackPtr(Imm32(sizeof(Value)));
2542
2543 bind(&end);
2544 }
2545
alignJitStackBasedOnNArgs(uint32_t argc)2546 void MacroAssembler::alignJitStackBasedOnNArgs(uint32_t argc) {
2547 // The stack should already be aligned to the size of a value.
2548 assertStackAlignment(sizeof(Value), 0);
2549
2550 static_assert(JitStackValueAlignment == 1 || JitStackValueAlignment == 2,
2551 "JitStackValueAlignment is either 1 or 2.");
2552 if (JitStackValueAlignment == 1) {
2553 return;
2554 }
2555
2556 // See above for full explanation.
2557 uint32_t nArgs = argc + 1;
2558 if (nArgs % 2 == 0) {
2559 // |argN| should be 16-byte aligned
2560 andToStackPtr(Imm32(~(JitStackAlignment - 1)));
2561 } else {
2562 // |argN| must be 16-byte aligned if argc is even,
2563 // and offset by 8 if argc is odd.
2564 Label end;
2565 branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
2566 subFromStackPtr(Imm32(sizeof(Value)));
2567 bind(&end);
2568 assertStackAlignment(JitStackAlignment, sizeof(Value));
2569 }
2570 }
2571
2572 // ===============================================================
2573
MacroAssembler(JSContext * cx)2574 MacroAssembler::MacroAssembler(JSContext* cx)
2575 : framePushed_(0),
2576 #ifdef DEBUG
2577 inCall_(false),
2578 #endif
2579 dynamicAlignment_(false),
2580 emitProfilingInstrumentation_(false) {
2581 jitContext_.emplace(cx, (js::jit::TempAllocator*)nullptr);
2582 alloc_.emplace(cx);
2583 moveResolver_.setAllocator(*jitContext_->temp);
2584 #if defined(JS_CODEGEN_ARM)
2585 initWithAllocator();
2586 m_buffer.id = GetJitContext()->getNextAssemblerId();
2587 #elif defined(JS_CODEGEN_ARM64)
2588 initWithAllocator();
2589 armbuffer_.id = GetJitContext()->getNextAssemblerId();
2590 #endif
2591 }
2592
MacroAssembler()2593 MacroAssembler::MacroAssembler()
2594 : framePushed_(0),
2595 #ifdef DEBUG
2596 inCall_(false),
2597 #endif
2598 dynamicAlignment_(false),
2599 emitProfilingInstrumentation_(false) {
2600 JitContext* jcx = GetJitContext();
2601
2602 if (!jcx->temp) {
2603 JSContext* cx = jcx->cx;
2604 MOZ_ASSERT(cx);
2605 alloc_.emplace(cx);
2606 }
2607
2608 moveResolver_.setAllocator(*jcx->temp);
2609
2610 #if defined(JS_CODEGEN_ARM)
2611 initWithAllocator();
2612 m_buffer.id = jcx->getNextAssemblerId();
2613 #elif defined(JS_CODEGEN_ARM64)
2614 initWithAllocator();
2615 armbuffer_.id = jcx->getNextAssemblerId();
2616 #endif
2617 }
2618
MacroAssembler(WasmToken,TempAllocator & alloc)2619 MacroAssembler::MacroAssembler(WasmToken, TempAllocator& alloc)
2620 : framePushed_(0),
2621 #ifdef DEBUG
2622 inCall_(false),
2623 #endif
2624 dynamicAlignment_(false),
2625 emitProfilingInstrumentation_(false) {
2626 moveResolver_.setAllocator(alloc);
2627
2628 #if defined(JS_CODEGEN_ARM)
2629 initWithAllocator();
2630 m_buffer.id = 0;
2631 #elif defined(JS_CODEGEN_ARM64)
2632 initWithAllocator();
2633 // Stubs + builtins + the baseline compiler all require the native SP,
2634 // not the PSP.
2635 SetStackPointer64(sp);
2636 armbuffer_.id = 0;
2637 #endif
2638 }
2639
icBuildOOLFakeExitFrame(void * fakeReturnAddr,AutoSaveLiveRegisters & save)2640 bool MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr,
2641 AutoSaveLiveRegisters& save) {
2642 return buildOOLFakeExitFrame(fakeReturnAddr);
2643 }
2644
2645 #ifndef JS_CODEGEN_ARM64
subFromStackPtr(Register reg)2646 void MacroAssembler::subFromStackPtr(Register reg) {
2647 subPtr(reg, getStackPointer());
2648 }
2649 #endif // JS_CODEGEN_ARM64
2650
2651 //{{{ check_macroassembler_style
2652 // ===============================================================
2653 // Stack manipulation functions.
2654
PushRegsInMask(LiveGeneralRegisterSet set)2655 void MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set) {
2656 PushRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
2657 }
2658
PopRegsInMask(LiveRegisterSet set)2659 void MacroAssembler::PopRegsInMask(LiveRegisterSet set) {
2660 PopRegsInMaskIgnore(set, LiveRegisterSet());
2661 }
2662
PopRegsInMask(LiveGeneralRegisterSet set)2663 void MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set) {
2664 PopRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
2665 }
2666
Push(jsid id,Register scratchReg)2667 void MacroAssembler::Push(jsid id, Register scratchReg) {
2668 if (id.isGCThing()) {
2669 // If we're pushing a gcthing, then we can't just push the tagged jsid
2670 // value since the GC won't have any idea that the push instruction
2671 // carries a reference to a gcthing. Need to unpack the pointer,
2672 // push it using ImmGCPtr, and then rematerialize the id at runtime.
2673
2674 if (JSID_IS_STRING(id)) {
2675 JSString* str = JSID_TO_STRING(id);
2676 MOZ_ASSERT(((size_t)str & JSID_TYPE_MASK) == 0);
2677 static_assert(JSID_TYPE_STRING == 0,
2678 "need to orPtr JSID_TYPE_STRING tag if it's not 0");
2679 Push(ImmGCPtr(str));
2680 } else {
2681 MOZ_ASSERT(JSID_IS_SYMBOL(id));
2682 JS::Symbol* sym = JSID_TO_SYMBOL(id);
2683 movePtr(ImmGCPtr(sym), scratchReg);
2684 orPtr(Imm32(JSID_TYPE_SYMBOL), scratchReg);
2685 Push(scratchReg);
2686 }
2687 } else {
2688 Push(ImmWord(JSID_BITS(id)));
2689 }
2690 }
2691
Push(TypedOrValueRegister v)2692 void MacroAssembler::Push(TypedOrValueRegister v) {
2693 if (v.hasValue()) {
2694 Push(v.valueReg());
2695 } else if (IsFloatingPointType(v.type())) {
2696 FloatRegister reg = v.typedReg().fpu();
2697 if (v.type() == MIRType::Float32) {
2698 ScratchDoubleScope fpscratch(*this);
2699 convertFloat32ToDouble(reg, fpscratch);
2700 PushBoxed(fpscratch);
2701 } else {
2702 PushBoxed(reg);
2703 }
2704 } else {
2705 Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr());
2706 }
2707 }
2708
Push(const ConstantOrRegister & v)2709 void MacroAssembler::Push(const ConstantOrRegister& v) {
2710 if (v.constant()) {
2711 Push(v.value());
2712 } else {
2713 Push(v.reg());
2714 }
2715 }
2716
Push(const Address & addr)2717 void MacroAssembler::Push(const Address& addr) {
2718 push(addr);
2719 framePushed_ += sizeof(uintptr_t);
2720 }
2721
Push(const ValueOperand & val)2722 void MacroAssembler::Push(const ValueOperand& val) {
2723 pushValue(val);
2724 framePushed_ += sizeof(Value);
2725 }
2726
Push(const Value & val)2727 void MacroAssembler::Push(const Value& val) {
2728 pushValue(val);
2729 framePushed_ += sizeof(Value);
2730 }
2731
Push(JSValueType type,Register reg)2732 void MacroAssembler::Push(JSValueType type, Register reg) {
2733 pushValue(type, reg);
2734 framePushed_ += sizeof(Value);
2735 }
2736
Push(const Register64 reg)2737 void MacroAssembler::Push(const Register64 reg) {
2738 #if JS_BITS_PER_WORD == 64
2739 Push(reg.reg);
2740 #else
2741 MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Big-endian not supported.");
2742 Push(reg.high);
2743 Push(reg.low);
2744 #endif
2745 }
2746
PushValue(const Address & addr)2747 void MacroAssembler::PushValue(const Address& addr) {
2748 MOZ_ASSERT(addr.base != getStackPointer());
2749 pushValue(addr);
2750 framePushed_ += sizeof(Value);
2751 }
2752
PushEmptyRooted(VMFunctionData::RootType rootType)2753 void MacroAssembler::PushEmptyRooted(VMFunctionData::RootType rootType) {
2754 switch (rootType) {
2755 case VMFunctionData::RootNone:
2756 MOZ_CRASH("Handle must have root type");
2757 case VMFunctionData::RootObject:
2758 case VMFunctionData::RootString:
2759 case VMFunctionData::RootFunction:
2760 case VMFunctionData::RootCell:
2761 case VMFunctionData::RootBigInt:
2762 Push(ImmPtr(nullptr));
2763 break;
2764 case VMFunctionData::RootValue:
2765 Push(UndefinedValue());
2766 break;
2767 case VMFunctionData::RootId:
2768 Push(ImmWord(JSID_BITS(JSID_VOID)));
2769 break;
2770 }
2771 }
2772
popRooted(VMFunctionData::RootType rootType,Register cellReg,const ValueOperand & valueReg)2773 void MacroAssembler::popRooted(VMFunctionData::RootType rootType,
2774 Register cellReg, const ValueOperand& valueReg) {
2775 switch (rootType) {
2776 case VMFunctionData::RootNone:
2777 MOZ_CRASH("Handle must have root type");
2778 case VMFunctionData::RootObject:
2779 case VMFunctionData::RootString:
2780 case VMFunctionData::RootFunction:
2781 case VMFunctionData::RootCell:
2782 case VMFunctionData::RootId:
2783 case VMFunctionData::RootBigInt:
2784 Pop(cellReg);
2785 break;
2786 case VMFunctionData::RootValue:
2787 Pop(valueReg);
2788 break;
2789 }
2790 }
2791
adjustStack(int amount)2792 void MacroAssembler::adjustStack(int amount) {
2793 if (amount > 0) {
2794 freeStack(amount);
2795 } else if (amount < 0) {
2796 reserveStack(-amount);
2797 }
2798 }
2799
freeStack(uint32_t amount)2800 void MacroAssembler::freeStack(uint32_t amount) {
2801 MOZ_ASSERT(amount <= framePushed_);
2802 if (amount) {
2803 addToStackPtr(Imm32(amount));
2804 }
2805 framePushed_ -= amount;
2806 }
2807
freeStack(Register amount)2808 void MacroAssembler::freeStack(Register amount) { addToStackPtr(amount); }
2809
2810 // ===============================================================
2811 // ABI function calls.
2812
setupABICall()2813 void MacroAssembler::setupABICall() {
2814 #ifdef DEBUG
2815 MOZ_ASSERT(!inCall_);
2816 inCall_ = true;
2817 #endif
2818
2819 #ifdef JS_SIMULATOR
2820 signature_ = 0;
2821 #endif
2822
2823 // Reinitialize the ABIArg generator.
2824 abiArgs_ = ABIArgGenerator();
2825
2826 #if defined(JS_CODEGEN_ARM)
2827 // On ARM, we need to know what ABI we are using, either in the
2828 // simulator, or based on the configure flags.
2829 # if defined(JS_SIMULATOR_ARM)
2830 abiArgs_.setUseHardFp(UseHardFpABI());
2831 # elif defined(JS_CODEGEN_ARM_HARDFP)
2832 abiArgs_.setUseHardFp(true);
2833 # else
2834 abiArgs_.setUseHardFp(false);
2835 # endif
2836 #endif
2837
2838 #if defined(JS_CODEGEN_MIPS32)
2839 // On MIPS, the system ABI use general registers pairs to encode double
2840 // arguments, after one or 2 integer-like arguments. Unfortunately, the
2841 // Lowering phase is not capable to express it at the moment. So we enforce
2842 // the system ABI here.
2843 abiArgs_.enforceO32ABI();
2844 #endif
2845 }
2846
setupWasmABICall()2847 void MacroAssembler::setupWasmABICall() {
2848 MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
2849 setupABICall();
2850
2851 #if defined(JS_CODEGEN_ARM)
2852 // The builtin thunk does the FP -> GPR moving on soft-FP, so
2853 // use hard fp unconditionally.
2854 abiArgs_.setUseHardFp(true);
2855 #endif
2856 dynamicAlignment_ = false;
2857 }
2858
setupAlignedABICall()2859 void MacroAssembler::setupAlignedABICall() {
2860 MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
2861 setupABICall();
2862 dynamicAlignment_ = false;
2863
2864 #if defined(JS_CODEGEN_ARM64)
2865 MOZ_CRASH("Not supported on arm64");
2866 #endif
2867 }
2868
passABIArg(const MoveOperand & from,MoveOp::Type type)2869 void MacroAssembler::passABIArg(const MoveOperand& from, MoveOp::Type type) {
2870 MOZ_ASSERT(inCall_);
2871 appendSignatureType(type);
2872
2873 ABIArg arg;
2874 switch (type) {
2875 case MoveOp::FLOAT32:
2876 arg = abiArgs_.next(MIRType::Float32);
2877 break;
2878 case MoveOp::DOUBLE:
2879 arg = abiArgs_.next(MIRType::Double);
2880 break;
2881 case MoveOp::GENERAL:
2882 arg = abiArgs_.next(MIRType::Pointer);
2883 break;
2884 default:
2885 MOZ_CRASH("Unexpected argument type");
2886 }
2887
2888 MoveOperand to(*this, arg);
2889 if (from == to) {
2890 return;
2891 }
2892
2893 if (oom()) {
2894 return;
2895 }
2896 propagateOOM(moveResolver_.addMove(from, to, type));
2897 }
2898
callWithABINoProfiler(void * fun,MoveOp::Type result,CheckUnsafeCallWithABI check)2899 void MacroAssembler::callWithABINoProfiler(void* fun, MoveOp::Type result,
2900 CheckUnsafeCallWithABI check) {
2901 appendSignatureType(result);
2902 #ifdef JS_SIMULATOR
2903 fun = Simulator::RedirectNativeFunction(fun, signature());
2904 #endif
2905
2906 uint32_t stackAdjust;
2907 callWithABIPre(&stackAdjust);
2908
2909 #ifdef DEBUG
2910 if (check == CheckUnsafeCallWithABI::Check) {
2911 push(ReturnReg);
2912 loadJSContext(ReturnReg);
2913 Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
2914 store32(Imm32(1), flagAddr);
2915 pop(ReturnReg);
2916 }
2917 #endif
2918
2919 call(ImmPtr(fun));
2920
2921 callWithABIPost(stackAdjust, result);
2922
2923 #ifdef DEBUG
2924 if (check == CheckUnsafeCallWithABI::Check) {
2925 Label ok;
2926 push(ReturnReg);
2927 loadJSContext(ReturnReg);
2928 Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
2929 branch32(Assembler::Equal, flagAddr, Imm32(0), &ok);
2930 assumeUnreachable("callWithABI: callee did not use AutoUnsafeCallWithABI");
2931 bind(&ok);
2932 pop(ReturnReg);
2933 }
2934 #endif
2935 }
2936
callWithABI(wasm::BytecodeOffset bytecode,wasm::SymbolicAddress imm,MoveOp::Type result)2937 CodeOffset MacroAssembler::callWithABI(wasm::BytecodeOffset bytecode,
2938 wasm::SymbolicAddress imm,
2939 MoveOp::Type result) {
2940 MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm));
2941
2942 // We clobber WasmTlsReg below in the loadWasmTlsRegFromFrame(), but Ion
2943 // assumes it is non-volatile, so preserve it manually.
2944 Push(WasmTlsReg);
2945
2946 uint32_t stackAdjust;
2947 callWithABIPre(&stackAdjust, /* callFromWasm = */ true);
2948
2949 // The TLS register is used in builtin thunks and must be set, by ABI:
2950 // reload it after passing arguments, which might have used it at spill
2951 // points when placing arguments.
2952 loadWasmTlsRegFromFrame();
2953
2954 CodeOffset raOffset = call(
2955 wasm::CallSiteDesc(bytecode.offset(), wasm::CallSite::Symbolic), imm);
2956
2957 callWithABIPost(stackAdjust, result, /* callFromWasm = */ true);
2958
2959 Pop(WasmTlsReg);
2960
2961 return raOffset;
2962 }
2963
callDebugWithABI(wasm::SymbolicAddress imm,MoveOp::Type result)2964 void MacroAssembler::callDebugWithABI(wasm::SymbolicAddress imm,
2965 MoveOp::Type result) {
2966 MOZ_ASSERT(!wasm::NeedsBuiltinThunk(imm));
2967 uint32_t stackAdjust;
2968 callWithABIPre(&stackAdjust, /* callFromWasm = */ false);
2969 call(imm);
2970 callWithABIPost(stackAdjust, result, /* callFromWasm = */ false);
2971 }
2972
2973 // ===============================================================
2974 // Exit frame footer.
2975
linkExitFrame(Register cxreg,Register scratch)2976 void MacroAssembler::linkExitFrame(Register cxreg, Register scratch) {
2977 loadPtr(Address(cxreg, JSContext::offsetOfActivation()), scratch);
2978 storeStackPtr(Address(scratch, JitActivation::offsetOfPackedExitFP()));
2979 }
2980
2981 // ===============================================================
2982 // Simple value-shuffling helpers, to hide MoveResolver verbosity
2983 // in common cases.
2984
moveRegPair(Register src0,Register src1,Register dst0,Register dst1,MoveOp::Type type)2985 void MacroAssembler::moveRegPair(Register src0, Register src1, Register dst0,
2986 Register dst1, MoveOp::Type type) {
2987 MoveResolver& moves = moveResolver();
2988 if (src0 != dst0) {
2989 propagateOOM(moves.addMove(MoveOperand(src0), MoveOperand(dst0), type));
2990 }
2991 if (src1 != dst1) {
2992 propagateOOM(moves.addMove(MoveOperand(src1), MoveOperand(dst1), type));
2993 }
2994 propagateOOM(moves.resolve());
2995 if (oom()) {
2996 return;
2997 }
2998
2999 MoveEmitter emitter(*this);
3000 emitter.emit(moves);
3001 emitter.finish();
3002 }
3003
3004 // ===============================================================
3005 // Arithmetic functions
3006
pow32(Register base,Register power,Register dest,Register temp1,Register temp2,Label * onOver)3007 void MacroAssembler::pow32(Register base, Register power, Register dest,
3008 Register temp1, Register temp2, Label* onOver) {
3009 // Inline int32-specialized implementation of js::powi with overflow
3010 // detection.
3011
3012 move32(Imm32(1), dest); // p = 1
3013
3014 // x^y where x == 1 returns 1 for any y.
3015 Label done;
3016 branch32(Assembler::Equal, base, Imm32(1), &done);
3017
3018 move32(base, temp1); // m = x
3019 move32(power, temp2); // n = y
3020
3021 // x^y where y < 0 returns a non-int32 value for any x != 1. Except when y is
3022 // large enough so that the result is no longer representable as a double with
3023 // fractional parts. We can't easily determine when y is too large, so we bail
3024 // here.
3025 Label start;
3026 branchTest32(Assembler::NotSigned, power, power, &start);
3027 jump(onOver);
3028
3029 Label loop;
3030 bind(&loop);
3031
3032 // m *= m
3033 branchMul32(Assembler::Overflow, temp1, temp1, onOver);
3034
3035 bind(&start);
3036
3037 // if ((n & 1) != 0) p *= m
3038 Label even;
3039 branchTest32(Assembler::Zero, temp2, Imm32(1), &even);
3040 branchMul32(Assembler::Overflow, temp1, dest, onOver);
3041 bind(&even);
3042
3043 // n >>= 1
3044 // if (n == 0) return p
3045 branchRshift32(Assembler::NonZero, Imm32(1), temp2, &loop);
3046
3047 bind(&done);
3048 }
3049
3050 // ===============================================================
3051 // Branch functions
3052
loadFunctionLength(Register func,Register funFlags,Register output,Label * slowPath)3053 void MacroAssembler::loadFunctionLength(Register func, Register funFlags,
3054 Register output, Label* slowPath) {
3055 #ifdef DEBUG
3056 {
3057 // These flags should already have been checked by caller.
3058 Label ok;
3059 uint32_t FlagsToCheck =
3060 FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH;
3061 branchTest32(Assembler::Zero, funFlags, Imm32(FlagsToCheck), &ok);
3062 assumeUnreachable("The function flags should already have been checked.");
3063 bind(&ok);
3064 }
3065 #endif // DEBUG
3066
3067 // NOTE: `funFlags` and `output` must be allowed to alias.
3068
3069 // Load the target function's length.
3070 Label isInterpreted, isBound, lengthLoaded;
3071 branchTest32(Assembler::NonZero, funFlags, Imm32(FunctionFlags::BOUND_FUN),
3072 &isBound);
3073 branchTest32(Assembler::NonZero, funFlags, Imm32(FunctionFlags::BASESCRIPT),
3074 &isInterpreted);
3075 {
3076 // Load the length property of a native function.
3077 load16ZeroExtend(Address(func, JSFunction::offsetOfNargs()), output);
3078 jump(&lengthLoaded);
3079 }
3080 bind(&isBound);
3081 {
3082 // Load the length property of a bound function.
3083 Address boundLength(
3084 func, FunctionExtended::offsetOfExtendedSlot(BOUND_FUN_LENGTH_SLOT));
3085 branchTestInt32(Assembler::NotEqual, boundLength, slowPath);
3086 unboxInt32(boundLength, output);
3087 jump(&lengthLoaded);
3088 }
3089 bind(&isInterpreted);
3090 {
3091 // Load the length property of an interpreted function.
3092 loadPtr(Address(func, JSFunction::offsetOfScript()), output);
3093 loadPtr(Address(output, JSScript::offsetOfSharedData()), output);
3094 branchTestPtr(Assembler::Zero, output, output, slowPath);
3095 loadPtr(Address(output, RuntimeScriptData::offsetOfISD()), output);
3096 load16ZeroExtend(Address(output, ImmutableScriptData::offsetOfFunLength()),
3097 output);
3098 }
3099 bind(&lengthLoaded);
3100 }
3101
branchTestObjGroupNoSpectreMitigations(Condition cond,Register obj,const Address & group,Register scratch,Label * label)3102 void MacroAssembler::branchTestObjGroupNoSpectreMitigations(
3103 Condition cond, Register obj, const Address& group, Register scratch,
3104 Label* label) {
3105 // Note: obj and scratch registers may alias.
3106 MOZ_ASSERT(group.base != scratch);
3107 MOZ_ASSERT(group.base != obj);
3108
3109 loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
3110 branchPtr(cond, group, scratch, label);
3111 }
3112
branchTestObjGroup(Condition cond,Register obj,const Address & group,Register scratch,Register spectreRegToZero,Label * label)3113 void MacroAssembler::branchTestObjGroup(Condition cond, Register obj,
3114 const Address& group, Register scratch,
3115 Register spectreRegToZero,
3116 Label* label) {
3117 // Note: obj and scratch registers may alias.
3118 MOZ_ASSERT(group.base != scratch);
3119 MOZ_ASSERT(group.base != obj);
3120 MOZ_ASSERT(scratch != spectreRegToZero);
3121
3122 loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
3123 branchPtr(cond, group, scratch, label);
3124
3125 if (JitOptions.spectreObjectMitigationsMisc) {
3126 spectreZeroRegister(cond, scratch, spectreRegToZero);
3127 }
3128 }
3129
branchTestObjCompartment(Condition cond,Register obj,const Address & compartment,Register scratch,Label * label)3130 void MacroAssembler::branchTestObjCompartment(Condition cond, Register obj,
3131 const Address& compartment,
3132 Register scratch, Label* label) {
3133 MOZ_ASSERT(obj != scratch);
3134 loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
3135 loadPtr(Address(scratch, ObjectGroup::offsetOfRealm()), scratch);
3136 loadPtr(Address(scratch, Realm::offsetOfCompartment()), scratch);
3137 branchPtr(cond, compartment, scratch, label);
3138 }
3139
branchTestObjCompartment(Condition cond,Register obj,const JS::Compartment * compartment,Register scratch,Label * label)3140 void MacroAssembler::branchTestObjCompartment(
3141 Condition cond, Register obj, const JS::Compartment* compartment,
3142 Register scratch, Label* label) {
3143 MOZ_ASSERT(obj != scratch);
3144 loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
3145 loadPtr(Address(scratch, ObjectGroup::offsetOfRealm()), scratch);
3146 loadPtr(Address(scratch, Realm::offsetOfCompartment()), scratch);
3147 branchPtr(cond, scratch, ImmPtr(compartment), label);
3148 }
3149
branchIfObjGroupHasNoAddendum(Register obj,Register scratch,Label * label)3150 void MacroAssembler::branchIfObjGroupHasNoAddendum(Register obj,
3151 Register scratch,
3152 Label* label) {
3153 MOZ_ASSERT(obj != scratch);
3154 loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
3155 branchPtr(Assembler::Equal, Address(scratch, ObjectGroup::offsetOfAddendum()),
3156 ImmWord(0), label);
3157 }
3158
branchIfPretenuredGroup(const ObjectGroup * group,Register scratch,Label * label)3159 void MacroAssembler::branchIfPretenuredGroup(const ObjectGroup* group,
3160 Register scratch, Label* label) {
3161 movePtr(ImmGCPtr(group), scratch);
3162 branchIfPretenuredGroup(scratch, label);
3163 }
3164
branchIfPretenuredGroup(Register group,Label * label)3165 void MacroAssembler::branchIfPretenuredGroup(Register group, Label* label) {
3166 // To check for the pretenured flag we need OBJECT_FLAG_PRETENURED set, and
3167 // OBJECT_FLAG_UNKNOWN_PROPERTIES unset, so check the latter first, and don't
3168 // branch if it set.
3169 Label unknownProperties;
3170 branchTest32(Assembler::NonZero, Address(group, ObjectGroup::offsetOfFlags()),
3171 Imm32(OBJECT_FLAG_UNKNOWN_PROPERTIES), &unknownProperties);
3172 branchTest32(Assembler::NonZero, Address(group, ObjectGroup::offsetOfFlags()),
3173 Imm32(OBJECT_FLAG_PRE_TENURE), label);
3174 bind(&unknownProperties);
3175 }
3176
branchIfNonNativeObj(Register obj,Register scratch,Label * label)3177 void MacroAssembler::branchIfNonNativeObj(Register obj, Register scratch,
3178 Label* label) {
3179 loadObjClassUnsafe(obj, scratch);
3180 branchTest32(Assembler::NonZero, Address(scratch, JSClass::offsetOfFlags()),
3181 Imm32(JSClass::NON_NATIVE), label);
3182 }
3183
branchIfInlineTypedObject(Register obj,Register scratch,Label * label)3184 void MacroAssembler::branchIfInlineTypedObject(Register obj, Register scratch,
3185 Label* label) {
3186 loadObjClassUnsafe(obj, scratch);
3187 branchPtr(Assembler::Equal, scratch, ImmPtr(&InlineOpaqueTypedObject::class_),
3188 label);
3189 branchPtr(Assembler::Equal, scratch,
3190 ImmPtr(&InlineTransparentTypedObject::class_), label);
3191 }
3192
copyObjGroupNoPreBarrier(Register sourceObj,Register destObj,Register scratch)3193 void MacroAssembler::copyObjGroupNoPreBarrier(Register sourceObj,
3194 Register destObj,
3195 Register scratch) {
3196 loadPtr(Address(sourceObj, JSObject::offsetOfGroup()), scratch);
3197 storePtr(scratch, Address(destObj, JSObject::offsetOfGroup()));
3198 }
3199
loadTypedObjectDescr(Register obj,Register dest)3200 void MacroAssembler::loadTypedObjectDescr(Register obj, Register dest) {
3201 loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
3202 loadPtr(Address(dest, ObjectGroup::offsetOfAddendum()), dest);
3203 }
3204
loadTypedObjectLength(Register obj,Register dest)3205 void MacroAssembler::loadTypedObjectLength(Register obj, Register dest) {
3206 loadTypedObjectDescr(obj, dest);
3207 unboxInt32(Address(dest, ArrayTypeDescr::offsetOfLength()), dest);
3208 }
3209
maybeBranchTestType(MIRType type,MDefinition * maybeDef,Register tag,Label * label)3210 void MacroAssembler::maybeBranchTestType(MIRType type, MDefinition* maybeDef,
3211 Register tag, Label* label) {
3212 if (!maybeDef || maybeDef->mightBeType(type)) {
3213 switch (type) {
3214 case MIRType::Null:
3215 branchTestNull(Equal, tag, label);
3216 break;
3217 case MIRType::Boolean:
3218 branchTestBoolean(Equal, tag, label);
3219 break;
3220 case MIRType::Int32:
3221 branchTestInt32(Equal, tag, label);
3222 break;
3223 case MIRType::Double:
3224 branchTestDouble(Equal, tag, label);
3225 break;
3226 case MIRType::String:
3227 branchTestString(Equal, tag, label);
3228 break;
3229 case MIRType::Symbol:
3230 branchTestSymbol(Equal, tag, label);
3231 break;
3232 case MIRType::BigInt:
3233 branchTestBigInt(Equal, tag, label);
3234 break;
3235 case MIRType::Object:
3236 branchTestObject(Equal, tag, label);
3237 break;
3238 default:
3239 MOZ_CRASH("Unsupported type");
3240 }
3241 }
3242 }
3243
wasmTrap(wasm::Trap trap,wasm::BytecodeOffset bytecodeOffset)3244 void MacroAssembler::wasmTrap(wasm::Trap trap,
3245 wasm::BytecodeOffset bytecodeOffset) {
3246 uint32_t trapOffset = wasmTrapInstruction().offset();
3247 MOZ_ASSERT_IF(!oom(),
3248 currentOffset() - trapOffset == WasmTrapInstructionLength);
3249
3250 append(trap, wasm::TrapSite(trapOffset, bytecodeOffset));
3251 }
3252
wasmInterruptCheck(Register tls,wasm::BytecodeOffset bytecodeOffset)3253 void MacroAssembler::wasmInterruptCheck(Register tls,
3254 wasm::BytecodeOffset bytecodeOffset) {
3255 Label ok;
3256 branch32(Assembler::Equal, Address(tls, offsetof(wasm::TlsData, interrupt)),
3257 Imm32(0), &ok);
3258 wasmTrap(wasm::Trap::CheckInterrupt, bytecodeOffset);
3259 bind(&ok);
3260 }
3261
wasmReserveStackChecked(uint32_t amount,wasm::BytecodeOffset trapOffset)3262 std::pair<CodeOffset, uint32_t> MacroAssembler::wasmReserveStackChecked(
3263 uint32_t amount, wasm::BytecodeOffset trapOffset) {
3264 if (amount > MAX_UNCHECKED_LEAF_FRAME_SIZE) {
3265 // The frame is large. Don't bump sp until after the stack limit check so
3266 // that the trap handler isn't called with a wild sp.
3267 Label ok;
3268 Register scratch = ABINonArgReg0;
3269 moveStackPtrTo(scratch);
3270
3271 Label trap;
3272 branchPtr(Assembler::Below, scratch, Imm32(amount), &trap);
3273 subPtr(Imm32(amount), scratch);
3274 branchPtr(Assembler::Below,
3275 Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)), scratch,
3276 &ok);
3277
3278 bind(&trap);
3279 wasmTrap(wasm::Trap::StackOverflow, trapOffset);
3280 CodeOffset trapInsnOffset = CodeOffset(currentOffset());
3281
3282 bind(&ok);
3283 reserveStack(amount);
3284 return std::pair<CodeOffset, uint32_t>(trapInsnOffset, 0);
3285 }
3286
3287 reserveStack(amount);
3288 Label ok;
3289 branchStackPtrRhs(Assembler::Below,
3290 Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)),
3291 &ok);
3292 wasmTrap(wasm::Trap::StackOverflow, trapOffset);
3293 CodeOffset trapInsnOffset = CodeOffset(currentOffset());
3294 bind(&ok);
3295 return std::pair<CodeOffset, uint32_t>(trapInsnOffset, amount);
3296 }
3297
wasmCallImport(const wasm::CallSiteDesc & desc,const wasm::CalleeDesc & callee)3298 CodeOffset MacroAssembler::wasmCallImport(const wasm::CallSiteDesc& desc,
3299 const wasm::CalleeDesc& callee) {
3300 // Load the callee, before the caller's registers are clobbered.
3301 uint32_t globalDataOffset = callee.importGlobalDataOffset();
3302 loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, code),
3303 ABINonArgReg0);
3304
3305 #ifndef JS_CODEGEN_NONE
3306 static_assert(ABINonArgReg0 != WasmTlsReg, "by constraint");
3307 #endif
3308
3309 // Switch to the callee's realm.
3310 loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, realm),
3311 ABINonArgReg1);
3312 loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), ABINonArgReg2);
3313 storePtr(ABINonArgReg1, Address(ABINonArgReg2, JSContext::offsetOfRealm()));
3314
3315 // Switch to the callee's TLS and pinned registers and make the call.
3316 loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, tls),
3317 WasmTlsReg);
3318 loadWasmPinnedRegsFromTls();
3319
3320 return call(desc, ABINonArgReg0);
3321 }
3322
wasmCallBuiltinInstanceMethod(const wasm::CallSiteDesc & desc,const ABIArg & instanceArg,wasm::SymbolicAddress builtin,wasm::FailureMode failureMode)3323 CodeOffset MacroAssembler::wasmCallBuiltinInstanceMethod(
3324 const wasm::CallSiteDesc& desc, const ABIArg& instanceArg,
3325 wasm::SymbolicAddress builtin, wasm::FailureMode failureMode) {
3326 MOZ_ASSERT(instanceArg != ABIArg());
3327
3328 if (instanceArg.kind() == ABIArg::GPR) {
3329 loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)),
3330 instanceArg.gpr());
3331 } else if (instanceArg.kind() == ABIArg::Stack) {
3332 // Safe to use ABINonArgReg0 since it's the last thing before the call.
3333 Register scratch = ABINonArgReg0;
3334 loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), scratch);
3335 storePtr(scratch,
3336 Address(getStackPointer(), instanceArg.offsetFromArgBase()));
3337 } else {
3338 MOZ_CRASH("Unknown abi passing style for pointer");
3339 }
3340
3341 CodeOffset ret = call(desc, builtin);
3342
3343 if (failureMode != wasm::FailureMode::Infallible) {
3344 Label noTrap;
3345 switch (failureMode) {
3346 case wasm::FailureMode::Infallible:
3347 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE();
3348 case wasm::FailureMode::FailOnNegI32:
3349 branchTest32(Assembler::NotSigned, ReturnReg, ReturnReg, &noTrap);
3350 break;
3351 case wasm::FailureMode::FailOnNullPtr:
3352 branchTestPtr(Assembler::NonZero, ReturnReg, ReturnReg, &noTrap);
3353 break;
3354 case wasm::FailureMode::FailOnInvalidRef:
3355 branchPtr(Assembler::NotEqual, ReturnReg,
3356 ImmWord(uintptr_t(wasm::AnyRef::invalid().forCompiledCode())),
3357 &noTrap);
3358 break;
3359 }
3360 wasmTrap(wasm::Trap::ThrowReported,
3361 wasm::BytecodeOffset(desc.lineOrBytecode()));
3362 bind(&noTrap);
3363 }
3364
3365 return ret;
3366 }
3367
wasmCallIndirect(const wasm::CallSiteDesc & desc,const wasm::CalleeDesc & callee,bool needsBoundsCheck)3368 CodeOffset MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc,
3369 const wasm::CalleeDesc& callee,
3370 bool needsBoundsCheck) {
3371 Register scratch = WasmTableCallScratchReg0;
3372 Register index = WasmTableCallIndexReg;
3373
3374 // Optimization opportunity: when offsetof(FunctionTableElem, code) == 0, as
3375 // it is at present, we can probably generate better code here by folding
3376 // the address computation into the load.
3377
3378 static_assert(sizeof(wasm::FunctionTableElem) == 8 ||
3379 sizeof(wasm::FunctionTableElem) == 16,
3380 "elements of function tables are two words");
3381
3382 if (callee.which() == wasm::CalleeDesc::AsmJSTable) {
3383 // asm.js tables require no signature check, and have had their index
3384 // masked into range and thus need no bounds check.
3385 loadWasmGlobalPtr(callee.tableFunctionBaseGlobalDataOffset(), scratch);
3386 if (sizeof(wasm::FunctionTableElem) == 8) {
3387 computeEffectiveAddress(BaseIndex(scratch, index, TimesEight), scratch);
3388 } else {
3389 lshift32(Imm32(4), index);
3390 addPtr(index, scratch);
3391 }
3392 loadPtr(Address(scratch, offsetof(wasm::FunctionTableElem, code)), scratch);
3393 return call(desc, scratch);
3394 }
3395
3396 MOZ_ASSERT(callee.which() == wasm::CalleeDesc::WasmTable);
3397
3398 // Write the functype-id into the ABI functype-id register.
3399 wasm::FuncTypeIdDesc funcTypeId = callee.wasmTableSigId();
3400 switch (funcTypeId.kind()) {
3401 case wasm::FuncTypeIdDescKind::Global:
3402 loadWasmGlobalPtr(funcTypeId.globalDataOffset(), WasmTableCallSigReg);
3403 break;
3404 case wasm::FuncTypeIdDescKind::Immediate:
3405 move32(Imm32(funcTypeId.immediate()), WasmTableCallSigReg);
3406 break;
3407 case wasm::FuncTypeIdDescKind::None:
3408 break;
3409 }
3410
3411 wasm::BytecodeOffset trapOffset(desc.lineOrBytecode());
3412
3413 // WebAssembly throws if the index is out-of-bounds.
3414 if (needsBoundsCheck) {
3415 loadWasmGlobalPtr(callee.tableLengthGlobalDataOffset(), scratch);
3416
3417 Label ok;
3418 branch32(Assembler::Condition::Below, index, scratch, &ok);
3419 wasmTrap(wasm::Trap::OutOfBounds, trapOffset);
3420 bind(&ok);
3421 }
3422
3423 // Load the base pointer of the table.
3424 loadWasmGlobalPtr(callee.tableFunctionBaseGlobalDataOffset(), scratch);
3425
3426 // Load the callee from the table.
3427 if (sizeof(wasm::FunctionTableElem) == 8) {
3428 computeEffectiveAddress(BaseIndex(scratch, index, TimesEight), scratch);
3429 } else {
3430 lshift32(Imm32(4), index);
3431 addPtr(index, scratch);
3432 }
3433
3434 loadPtr(Address(scratch, offsetof(wasm::FunctionTableElem, tls)), WasmTlsReg);
3435
3436 Label nonNull;
3437 branchTest32(Assembler::NonZero, WasmTlsReg, WasmTlsReg, &nonNull);
3438 wasmTrap(wasm::Trap::IndirectCallToNull, trapOffset);
3439 bind(&nonNull);
3440
3441 loadWasmPinnedRegsFromTls();
3442 switchToWasmTlsRealm(index, WasmTableCallScratchReg1);
3443
3444 loadPtr(Address(scratch, offsetof(wasm::FunctionTableElem, code)), scratch);
3445
3446 return call(desc, scratch);
3447 }
3448
nopPatchableToCall(const wasm::CallSiteDesc & desc)3449 void MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc& desc) {
3450 CodeOffset offset = nopPatchableToCall();
3451 append(desc, offset);
3452 }
3453
emitPreBarrierFastPath(JSRuntime * rt,MIRType type,Register temp1,Register temp2,Register temp3,Label * noBarrier)3454 void MacroAssembler::emitPreBarrierFastPath(JSRuntime* rt, MIRType type,
3455 Register temp1, Register temp2,
3456 Register temp3, Label* noBarrier) {
3457 MOZ_ASSERT(temp1 != PreBarrierReg);
3458 MOZ_ASSERT(temp2 != PreBarrierReg);
3459 MOZ_ASSERT(temp3 != PreBarrierReg);
3460
3461 // Load the GC thing in temp1.
3462 if (type == MIRType::Value) {
3463 unboxGCThingForGCBarrier(Address(PreBarrierReg, 0), temp1);
3464 } else {
3465 MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
3466 type == MIRType::Shape || type == MIRType::ObjectGroup);
3467 loadPtr(Address(PreBarrierReg, 0), temp1);
3468 }
3469
3470 #ifdef DEBUG
3471 // The caller should have checked for null pointers.
3472 Label nonZero;
3473 branchTestPtr(Assembler::NonZero, temp1, temp1, &nonZero);
3474 assumeUnreachable("JIT pre-barrier: unexpected nullptr");
3475 bind(&nonZero);
3476 #endif
3477
3478 // Load the chunk address in temp2.
3479 movePtr(ImmWord(~gc::ChunkMask), temp2);
3480 andPtr(temp1, temp2);
3481
3482 // If the GC thing is in the nursery, we don't need to barrier it.
3483 if (type == MIRType::Value || type == MIRType::Object ||
3484 type == MIRType::String) {
3485 branch32(Assembler::Equal, Address(temp2, gc::ChunkLocationOffset),
3486 Imm32(int32_t(gc::ChunkLocation::Nursery)), noBarrier);
3487 } else {
3488 #ifdef DEBUG
3489 Label isTenured;
3490 branch32(Assembler::NotEqual, Address(temp2, gc::ChunkLocationOffset),
3491 Imm32(int32_t(gc::ChunkLocation::Nursery)), &isTenured);
3492 assumeUnreachable("JIT pre-barrier: unexpected nursery pointer");
3493 bind(&isTenured);
3494 #endif
3495 }
3496
3497 // If it's a permanent atom or symbol from a parent runtime we don't
3498 // need to barrier it.
3499 if (type == MIRType::Value || type == MIRType::String) {
3500 branchPtr(Assembler::NotEqual, Address(temp2, gc::ChunkRuntimeOffset),
3501 ImmPtr(rt), noBarrier);
3502 } else {
3503 #ifdef DEBUG
3504 Label thisRuntime;
3505 branchPtr(Assembler::Equal, Address(temp2, gc::ChunkRuntimeOffset),
3506 ImmPtr(rt), &thisRuntime);
3507 assumeUnreachable("JIT pre-barrier: unexpected runtime");
3508 bind(&thisRuntime);
3509 #endif
3510 }
3511
3512 // Determine the bit index and store in temp1.
3513 //
3514 // bit = (addr & js::gc::ChunkMask) / js::gc::CellBytesPerMarkBit +
3515 // static_cast<uint32_t>(colorBit);
3516 static_assert(gc::CellBytesPerMarkBit == 8,
3517 "Calculation below relies on this");
3518 static_assert(size_t(gc::ColorBit::BlackBit) == 0,
3519 "Calculation below relies on this");
3520 andPtr(Imm32(gc::ChunkMask), temp1);
3521 rshiftPtr(Imm32(3), temp1);
3522
3523 static const size_t nbits = sizeof(uintptr_t) * CHAR_BIT;
3524 static_assert(nbits == JS_BITS_PER_WORD, "Calculation below relies on this");
3525
3526 // Load the bitmap word in temp2.
3527 //
3528 // word = chunk.bitmap[bit / nbits];
3529 movePtr(temp1, temp3);
3530 #if JS_BITS_PER_WORD == 64
3531 rshiftPtr(Imm32(6), temp1);
3532 loadPtr(BaseIndex(temp2, temp1, TimesEight, gc::ChunkMarkBitmapOffset),
3533 temp2);
3534 #else
3535 rshiftPtr(Imm32(5), temp1);
3536 loadPtr(BaseIndex(temp2, temp1, TimesFour, gc::ChunkMarkBitmapOffset), temp2);
3537 #endif
3538
3539 // Load the mask in temp1.
3540 //
3541 // mask = uintptr_t(1) << (bit % nbits);
3542 andPtr(Imm32(nbits - 1), temp3);
3543 move32(Imm32(1), temp1);
3544 #ifdef JS_CODEGEN_X64
3545 MOZ_ASSERT(temp3 == rcx);
3546 shlq_cl(temp1);
3547 #elif JS_CODEGEN_X86
3548 MOZ_ASSERT(temp3 == ecx);
3549 shll_cl(temp1);
3550 #elif JS_CODEGEN_ARM
3551 ma_lsl(temp3, temp1, temp1);
3552 #elif JS_CODEGEN_ARM64
3553 Lsl(ARMRegister(temp1, 64), ARMRegister(temp1, 64), ARMRegister(temp3, 64));
3554 #elif JS_CODEGEN_MIPS32
3555 ma_sll(temp1, temp1, temp3);
3556 #elif JS_CODEGEN_MIPS64
3557 ma_dsll(temp1, temp1, temp3);
3558 #elif JS_CODEGEN_NONE
3559 MOZ_CRASH();
3560 #else
3561 # error "Unknown architecture"
3562 #endif
3563
3564 // No barrier is needed if the bit is set, |word & mask != 0|.
3565 branchTestPtr(Assembler::NonZero, temp2, temp1, noBarrier);
3566 }
3567
3568 // ========================================================================
3569 // Spectre Mitigations.
3570
spectreMaskIndex(Register index,Register length,Register output)3571 void MacroAssembler::spectreMaskIndex(Register index, Register length,
3572 Register output) {
3573 MOZ_ASSERT(JitOptions.spectreIndexMasking);
3574 MOZ_ASSERT(length != output);
3575 MOZ_ASSERT(index != output);
3576
3577 move32(Imm32(0), output);
3578 cmp32Move32(Assembler::Below, index, length, index, output);
3579 }
3580
spectreMaskIndex(Register index,const Address & length,Register output)3581 void MacroAssembler::spectreMaskIndex(Register index, const Address& length,
3582 Register output) {
3583 MOZ_ASSERT(JitOptions.spectreIndexMasking);
3584 MOZ_ASSERT(index != length.base);
3585 MOZ_ASSERT(length.base != output);
3586 MOZ_ASSERT(index != output);
3587
3588 move32(Imm32(0), output);
3589 cmp32Move32(Assembler::Below, index, length, index, output);
3590 }
3591
boundsCheck32PowerOfTwo(Register index,uint32_t length,Label * failure)3592 void MacroAssembler::boundsCheck32PowerOfTwo(Register index, uint32_t length,
3593 Label* failure) {
3594 MOZ_ASSERT(mozilla::IsPowerOfTwo(length));
3595 branch32(Assembler::AboveOrEqual, index, Imm32(length), failure);
3596
3597 // Note: it's fine to clobber the input register, as this is a no-op: it
3598 // only affects speculative execution.
3599 if (JitOptions.spectreIndexMasking) {
3600 and32(Imm32(length - 1), index);
3601 }
3602 }
3603
3604 //}}} check_macroassembler_style
3605
memoryBarrierBefore(const Synchronization & sync)3606 void MacroAssembler::memoryBarrierBefore(const Synchronization& sync) {
3607 memoryBarrier(sync.barrierBefore);
3608 }
3609
memoryBarrierAfter(const Synchronization & sync)3610 void MacroAssembler::memoryBarrierAfter(const Synchronization& sync) {
3611 memoryBarrier(sync.barrierAfter);
3612 }
3613
loadWasmTlsRegFromFrame(Register dest)3614 void MacroAssembler::loadWasmTlsRegFromFrame(Register dest) {
3615 loadPtr(
3616 Address(getStackPointer(), framePushed() + offsetof(wasm::Frame, tls)),
3617 dest);
3618 }
3619
emit(MacroAssembler & masm)3620 void MacroAssembler::BranchGCPtr::emit(MacroAssembler& masm) {
3621 MOZ_ASSERT(isInitialized());
3622 masm.branchPtr(cond(), reg(), ptr_, jump());
3623 }
3624
debugAssertIsObject(const ValueOperand & val)3625 void MacroAssembler::debugAssertIsObject(const ValueOperand& val) {
3626 #ifdef DEBUG
3627 Label ok;
3628 branchTestObject(Assembler::Equal, val, &ok);
3629 assumeUnreachable("Expected an object!");
3630 bind(&ok);
3631 #endif
3632 }
3633
debugAssertObjHasFixedSlots(Register obj,Register scratch)3634 void MacroAssembler::debugAssertObjHasFixedSlots(Register obj,
3635 Register scratch) {
3636 #ifdef DEBUG
3637 Label hasFixedSlots;
3638 loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
3639 branchTest32(Assembler::NonZero,
3640 Address(scratch, Shape::offsetOfImmutableFlags()),
3641 Imm32(Shape::fixedSlotsMask()), &hasFixedSlots);
3642 assumeUnreachable("Expected a fixed slot");
3643 bind(&hasFixedSlots);
3644 #endif
3645 }
3646
branchIfNativeIteratorNotReusable(Register ni,Label * notReusable)3647 void MacroAssembler::branchIfNativeIteratorNotReusable(Register ni,
3648 Label* notReusable) {
3649 // See NativeIterator::isReusable.
3650 Address flagsAddr(ni, NativeIterator::offsetOfFlagsAndCount());
3651
3652 #ifdef DEBUG
3653 Label niIsInitialized;
3654 branchTest32(Assembler::NonZero, flagsAddr,
3655 Imm32(NativeIterator::Flags::Initialized), &niIsInitialized);
3656 assumeUnreachable(
3657 "Expected a NativeIterator that's been completely "
3658 "initialized");
3659 bind(&niIsInitialized);
3660 #endif
3661
3662 branchTest32(Assembler::NonZero, flagsAddr,
3663 Imm32(NativeIterator::Flags::NotReusable), notReusable);
3664 }
3665
LoadNativeIterator(MacroAssembler & masm,Register obj,Register dest)3666 static void LoadNativeIterator(MacroAssembler& masm, Register obj,
3667 Register dest) {
3668 MOZ_ASSERT(obj != dest);
3669
3670 #ifdef DEBUG
3671 // Assert we have a PropertyIteratorObject.
3672 Label ok;
3673 masm.branchTestObjClass(Assembler::Equal, obj,
3674 &PropertyIteratorObject::class_, dest, obj, &ok);
3675 masm.assumeUnreachable("Expected PropertyIteratorObject!");
3676 masm.bind(&ok);
3677 #endif
3678
3679 // Load NativeIterator object.
3680 masm.loadObjPrivate(obj, PropertyIteratorObject::NUM_FIXED_SLOTS, dest);
3681 }
3682
iteratorMore(Register obj,ValueOperand output,Register temp)3683 void MacroAssembler::iteratorMore(Register obj, ValueOperand output,
3684 Register temp) {
3685 Label done;
3686 Register outputScratch = output.scratchReg();
3687 LoadNativeIterator(*this, obj, outputScratch);
3688
3689 // If propertyCursor_ < propertiesEnd_, load the next string and advance
3690 // the cursor. Otherwise return MagicValue(JS_NO_ITER_VALUE).
3691 Label iterDone;
3692 Address cursorAddr(outputScratch, NativeIterator::offsetOfPropertyCursor());
3693 Address cursorEndAddr(outputScratch, NativeIterator::offsetOfPropertiesEnd());
3694 loadPtr(cursorAddr, temp);
3695 branchPtr(Assembler::BelowOrEqual, cursorEndAddr, temp, &iterDone);
3696
3697 // Get next string.
3698 loadPtr(Address(temp, 0), temp);
3699
3700 // Increase the cursor.
3701 addPtr(Imm32(sizeof(GCPtrLinearString)), cursorAddr);
3702
3703 tagValue(JSVAL_TYPE_STRING, temp, output);
3704 jump(&done);
3705
3706 bind(&iterDone);
3707 moveValue(MagicValue(JS_NO_ITER_VALUE), output);
3708
3709 bind(&done);
3710 }
3711
iteratorClose(Register obj,Register temp1,Register temp2,Register temp3)3712 void MacroAssembler::iteratorClose(Register obj, Register temp1, Register temp2,
3713 Register temp3) {
3714 LoadNativeIterator(*this, obj, temp1);
3715
3716 // Clear active bit.
3717 and32(Imm32(~NativeIterator::Flags::Active),
3718 Address(temp1, NativeIterator::offsetOfFlagsAndCount()));
3719
3720 // Reset property cursor.
3721 loadPtr(Address(temp1, NativeIterator::offsetOfGuardsEnd()), temp2);
3722 storePtr(temp2, Address(temp1, NativeIterator::offsetOfPropertyCursor()));
3723
3724 // Unlink from the iterator list.
3725 const Register next = temp2;
3726 const Register prev = temp3;
3727 loadPtr(Address(temp1, NativeIterator::offsetOfNext()), next);
3728 loadPtr(Address(temp1, NativeIterator::offsetOfPrev()), prev);
3729 storePtr(prev, Address(next, NativeIterator::offsetOfPrev()));
3730 storePtr(next, Address(prev, NativeIterator::offsetOfNext()));
3731 #ifdef DEBUG
3732 storePtr(ImmPtr(nullptr), Address(temp1, NativeIterator::offsetOfNext()));
3733 storePtr(ImmPtr(nullptr), Address(temp1, NativeIterator::offsetOfPrev()));
3734 #endif
3735 }
3736
3737 template <typename T, size_t N, typename P>
AddPendingReadBarrier(Vector<T *,N,P> & list,T * value)3738 static bool AddPendingReadBarrier(Vector<T*, N, P>& list, T* value) {
3739 // Check if value is already present in tail of list.
3740 // TODO: Consider using a hash table here.
3741 const size_t TailWindow = 4;
3742
3743 size_t len = list.length();
3744 for (size_t i = 0; i < std::min(len, TailWindow); i++) {
3745 if (list[len - i - 1] == value) {
3746 return true;
3747 }
3748 }
3749
3750 return list.append(value);
3751 }
3752
getSingletonAndDelayBarrier(const TypeSet * types,size_t i)3753 JSObject* MacroAssembler::getSingletonAndDelayBarrier(const TypeSet* types,
3754 size_t i) {
3755 JSObject* object = types->getSingletonNoBarrier(i);
3756 if (!object) {
3757 return nullptr;
3758 }
3759
3760 if (!AddPendingReadBarrier(pendingObjectReadBarriers_, object)) {
3761 setOOM();
3762 }
3763
3764 return object;
3765 }
3766
getGroupAndDelayBarrier(const TypeSet * types,size_t i)3767 ObjectGroup* MacroAssembler::getGroupAndDelayBarrier(const TypeSet* types,
3768 size_t i) {
3769 ObjectGroup* group = types->getGroupNoBarrier(i);
3770 if (!group) {
3771 return nullptr;
3772 }
3773
3774 if (!AddPendingReadBarrier(pendingObjectGroupReadBarriers_, group)) {
3775 setOOM();
3776 }
3777
3778 return group;
3779 }
3780
performPendingReadBarriers()3781 void MacroAssembler::performPendingReadBarriers() {
3782 for (JSObject* object : pendingObjectReadBarriers_) {
3783 JSObject::readBarrier(object);
3784 }
3785 for (ObjectGroup* group : pendingObjectGroupReadBarriers_) {
3786 ObjectGroup::readBarrier(group);
3787 }
3788 }
3789
3790 // Can't push large frames blindly on windows, so we must touch frame memory
3791 // incrementally, with no more than 4096 - 1 bytes between touches.
3792 //
3793 // This is used across all platforms for simplicity.
touchFrameValues(Register numStackValues,Register scratch1,Register scratch2)3794 void MacroAssembler::touchFrameValues(Register numStackValues,
3795 Register scratch1, Register scratch2) {
3796 const size_t FRAME_TOUCH_INCREMENT = 2048;
3797 static_assert(FRAME_TOUCH_INCREMENT < 4096 - 1,
3798 "Frame increment is too large");
3799
3800 moveStackPtrTo(scratch2);
3801 mov(numStackValues, scratch1);
3802 lshiftPtr(Imm32(3), scratch1);
3803 subPtr(scratch1, scratch2);
3804 {
3805 moveStackPtrTo(scratch1);
3806 subPtr(Imm32(FRAME_TOUCH_INCREMENT), scratch1);
3807
3808 Label touchFrameLoop;
3809 Label touchFrameLoopEnd;
3810 bind(&touchFrameLoop);
3811 branchPtr(Assembler::Below, scratch1, scratch2, &touchFrameLoopEnd);
3812 store32(Imm32(0), Address(scratch1, 0));
3813 subPtr(Imm32(FRAME_TOUCH_INCREMENT), scratch1);
3814 jump(&touchFrameLoop);
3815 bind(&touchFrameLoopEnd);
3816 }
3817 }
3818
3819 namespace js {
3820 namespace jit {
3821
3822 #ifdef DEBUG
3823 template <class RegisterType>
AutoGenericRegisterScope(MacroAssembler & masm,RegisterType reg)3824 AutoGenericRegisterScope<RegisterType>::AutoGenericRegisterScope(
3825 MacroAssembler& masm, RegisterType reg)
3826 : RegisterType(reg), masm_(masm), released_(false) {
3827 masm.debugTrackedRegisters_.add(reg);
3828 }
3829
3830 template AutoGenericRegisterScope<Register>::AutoGenericRegisterScope(
3831 MacroAssembler& masm, Register reg);
3832 template AutoGenericRegisterScope<FloatRegister>::AutoGenericRegisterScope(
3833 MacroAssembler& masm, FloatRegister reg);
3834 #endif // DEBUG
3835
3836 #ifdef DEBUG
3837 template <class RegisterType>
~AutoGenericRegisterScope()3838 AutoGenericRegisterScope<RegisterType>::~AutoGenericRegisterScope() {
3839 if (!released_) {
3840 release();
3841 }
3842 }
3843
3844 template AutoGenericRegisterScope<Register>::~AutoGenericRegisterScope();
3845 template AutoGenericRegisterScope<FloatRegister>::~AutoGenericRegisterScope();
3846
3847 template <class RegisterType>
release()3848 void AutoGenericRegisterScope<RegisterType>::release() {
3849 MOZ_ASSERT(!released_);
3850 released_ = true;
3851 const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
3852 masm_.debugTrackedRegisters_.take(reg);
3853 }
3854
3855 template void AutoGenericRegisterScope<Register>::release();
3856 template void AutoGenericRegisterScope<FloatRegister>::release();
3857
3858 template <class RegisterType>
reacquire()3859 void AutoGenericRegisterScope<RegisterType>::reacquire() {
3860 MOZ_ASSERT(released_);
3861 released_ = false;
3862 const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
3863 masm_.debugTrackedRegisters_.add(reg);
3864 }
3865
3866 template void AutoGenericRegisterScope<Register>::reacquire();
3867 template void AutoGenericRegisterScope<FloatRegister>::reacquire();
3868
3869 #endif // DEBUG
3870
3871 } // namespace jit
3872 } // namespace js
3873