1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/MacroAssembler-inl.h"
8
9 #include "jsfriendapi.h"
10 #include "jsprf.h"
11
12 #include "builtin/TypedObject.h"
13 #include "gc/GCTrace.h"
14 #include "jit/AtomicOp.h"
15 #include "jit/Bailouts.h"
16 #include "jit/BaselineFrame.h"
17 #include "jit/BaselineIC.h"
18 #include "jit/BaselineJIT.h"
19 #include "jit/Lowering.h"
20 #include "jit/MIR.h"
21 #include "js/Conversions.h"
22 #include "vm/TraceLogging.h"
23
24 #include "jsobjinlines.h"
25 #include "jit/shared/Lowering-shared-inl.h"
26 #include "vm/Interpreter-inl.h"
27
28 using namespace js;
29 using namespace js::jit;
30
31 using JS::GenericNaN;
32 using JS::ToInt32;
33
34 template <typename Source> void
guardTypeSet(const Source & address,const TypeSet * types,BarrierKind kind,Register scratch,Label * miss)35 MacroAssembler::guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind,
36 Register scratch, Label* miss)
37 {
38 MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
39 MOZ_ASSERT(!types->unknown());
40
41 Label matched;
42 TypeSet::Type tests[8] = {
43 TypeSet::Int32Type(),
44 TypeSet::UndefinedType(),
45 TypeSet::BooleanType(),
46 TypeSet::StringType(),
47 TypeSet::SymbolType(),
48 TypeSet::NullType(),
49 TypeSet::MagicArgType(),
50 TypeSet::AnyObjectType()
51 };
52
53 // The double type also implies Int32.
54 // So replace the int32 test with the double one.
55 if (types->hasType(TypeSet::DoubleType())) {
56 MOZ_ASSERT(types->hasType(TypeSet::Int32Type()));
57 tests[0] = TypeSet::DoubleType();
58 }
59
60 Register tag = extractTag(address, scratch);
61
62 // Emit all typed tests.
63 BranchType lastBranch;
64 for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
65 if (!types->hasType(tests[i]))
66 continue;
67
68 if (lastBranch.isInitialized())
69 lastBranch.emit(*this);
70 lastBranch = BranchType(Equal, tag, tests[i], &matched);
71 }
72
73 // If this is the last check, invert the last branch.
74 if (types->hasType(TypeSet::AnyObjectType()) || !types->getObjectCount()) {
75 if (!lastBranch.isInitialized()) {
76 jump(miss);
77 return;
78 }
79
80 lastBranch.invertCondition();
81 lastBranch.relink(miss);
82 lastBranch.emit(*this);
83
84 bind(&matched);
85 return;
86 }
87
88 if (lastBranch.isInitialized())
89 lastBranch.emit(*this);
90
91 // Test specific objects.
92 MOZ_ASSERT(scratch != InvalidReg);
93 branchTestObject(NotEqual, tag, miss);
94 if (kind != BarrierKind::TypeTagOnly) {
95 Register obj = extractObject(address, scratch);
96 guardObjectType(obj, types, scratch, miss);
97 } else {
98 #ifdef DEBUG
99 Label fail;
100 Register obj = extractObject(address, scratch);
101 guardObjectType(obj, types, scratch, &fail);
102 jump(&matched);
103 bind(&fail);
104
105 if (obj == scratch)
106 extractObject(address, scratch);
107 guardTypeSetMightBeIncomplete(types, obj, scratch, &matched);
108
109 assumeUnreachable("Unexpected object type");
110 #endif
111 }
112
113 bind(&matched);
114 }
115
116 template <typename TypeSet>
117 void
guardTypeSetMightBeIncomplete(TypeSet * types,Register obj,Register scratch,Label * label)118 MacroAssembler::guardTypeSetMightBeIncomplete(TypeSet* types, Register obj, Register scratch, Label* label)
119 {
120 // Type set guards might miss when an object's group changes. In this case
121 // either its old group's properties will become unknown, or it will change
122 // to a native object with an original unboxed group. Jump to label if this
123 // might have happened for the input object.
124
125 if (types->unknownObject()) {
126 jump(label);
127 return;
128 }
129
130 loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
131 load32(Address(scratch, ObjectGroup::offsetOfFlags()), scratch);
132 and32(Imm32(OBJECT_FLAG_ADDENDUM_MASK), scratch);
133 branch32(Assembler::Equal,
134 scratch, Imm32(ObjectGroup::addendumOriginalUnboxedGroupValue()), label);
135
136 for (size_t i = 0; i < types->getObjectCount(); i++) {
137 if (JSObject* singleton = types->getSingletonNoBarrier(i)) {
138 movePtr(ImmGCPtr(singleton), scratch);
139 loadPtr(Address(scratch, JSObject::offsetOfGroup()), scratch);
140 } else if (ObjectGroup* group = types->getGroupNoBarrier(i)) {
141 movePtr(ImmGCPtr(group), scratch);
142 } else {
143 continue;
144 }
145 branchTest32(Assembler::NonZero, Address(scratch, ObjectGroup::offsetOfFlags()),
146 Imm32(OBJECT_FLAG_UNKNOWN_PROPERTIES), label);
147 }
148 }
149
150 void
guardObjectType(Register obj,const TypeSet * types,Register scratch,Label * miss)151 MacroAssembler::guardObjectType(Register obj, const TypeSet* types,
152 Register scratch, Label* miss)
153 {
154 MOZ_ASSERT(!types->unknown());
155 MOZ_ASSERT(!types->hasType(TypeSet::AnyObjectType()));
156 MOZ_ASSERT_IF(types->getObjectCount() > 0, scratch != InvalidReg);
157
158 // Note: this method elides read barriers on values read from type sets, as
159 // this may be called off the main thread during Ion compilation. This is
160 // safe to do as the final JitCode object will be allocated during the
161 // incremental GC (or the compilation canceled before we start sweeping),
162 // see CodeGenerator::link. Other callers should use TypeSet::readBarrier
163 // to trigger the barrier on the contents of type sets passed in here.
164 Label matched;
165
166 BranchGCPtr lastBranch;
167 MOZ_ASSERT(!lastBranch.isInitialized());
168 bool hasObjectGroups = false;
169 unsigned count = types->getObjectCount();
170 for (unsigned i = 0; i < count; i++) {
171 if (!types->getSingletonNoBarrier(i)) {
172 hasObjectGroups = hasObjectGroups || types->getGroupNoBarrier(i);
173 continue;
174 }
175
176 if (lastBranch.isInitialized())
177 lastBranch.emit(*this);
178
179 JSObject* object = types->getSingletonNoBarrier(i);
180 lastBranch = BranchGCPtr(Equal, obj, ImmGCPtr(object), &matched);
181 }
182
183 if (hasObjectGroups) {
184 // We are possibly going to overwrite the obj register. So already
185 // emit the branch, since branch depends on previous value of obj
186 // register and there is definitely a branch following. So no need
187 // to invert the condition.
188 if (lastBranch.isInitialized())
189 lastBranch.emit(*this);
190 lastBranch = BranchGCPtr();
191
192 // Note: Some platforms give the same register for obj and scratch.
193 // Make sure when writing to scratch, the obj register isn't used anymore!
194 loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
195
196 for (unsigned i = 0; i < count; i++) {
197 if (!types->getGroupNoBarrier(i))
198 continue;
199
200 if (lastBranch.isInitialized())
201 lastBranch.emit(*this);
202
203 ObjectGroup* group = types->getGroupNoBarrier(i);
204 lastBranch = BranchGCPtr(Equal, scratch, ImmGCPtr(group), &matched);
205 }
206 }
207
208 if (!lastBranch.isInitialized()) {
209 jump(miss);
210 return;
211 }
212
213 lastBranch.invertCondition();
214 lastBranch.relink(miss);
215 lastBranch.emit(*this);
216
217 bind(&matched);
218 }
219
220 template void MacroAssembler::guardTypeSet(const Address& address, const TypeSet* types,
221 BarrierKind kind, Register scratch, Label* miss);
222 template void MacroAssembler::guardTypeSet(const ValueOperand& value, const TypeSet* types,
223 BarrierKind kind, Register scratch, Label* miss);
224 template void MacroAssembler::guardTypeSet(const TypedOrValueRegister& value, const TypeSet* types,
225 BarrierKind kind, Register scratch, Label* miss);
226
227 template void MacroAssembler::guardTypeSetMightBeIncomplete(const TemporaryTypeSet* types,
228 Register obj, Register scratch,
229 Label* label);
230
231 template<typename S, typename T>
232 static void
StoreToTypedFloatArray(MacroAssembler & masm,int arrayType,const S & value,const T & dest,unsigned numElems)233 StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, const T& dest,
234 unsigned numElems)
235 {
236 switch (arrayType) {
237 case Scalar::Float32:
238 masm.storeFloat32(value, dest);
239 break;
240 case Scalar::Float64:
241 #ifdef JS_MORE_DETERMINISTIC
242 // See the comment in TypedArrayObjectTemplate::doubleToNative.
243 masm.canonicalizeDouble(value);
244 #endif
245 masm.storeDouble(value, dest);
246 break;
247 case Scalar::Float32x4:
248 switch (numElems) {
249 case 1:
250 masm.storeFloat32(value, dest);
251 break;
252 case 2:
253 masm.storeDouble(value, dest);
254 break;
255 case 3:
256 masm.storeFloat32x3(value, dest);
257 break;
258 case 4:
259 masm.storeUnalignedFloat32x4(value, dest);
260 break;
261 default: MOZ_CRASH("unexpected number of elements in simd write");
262 }
263 break;
264 case Scalar::Int32x4:
265 switch (numElems) {
266 case 1:
267 masm.storeInt32x1(value, dest);
268 break;
269 case 2:
270 masm.storeInt32x2(value, dest);
271 break;
272 case 3:
273 masm.storeInt32x3(value, dest);
274 break;
275 case 4:
276 masm.storeUnalignedInt32x4(value, dest);
277 break;
278 default: MOZ_CRASH("unexpected number of elements in simd write");
279 }
280 break;
281 default:
282 MOZ_CRASH("Invalid typed array type");
283 }
284 }
285
286 void
storeToTypedFloatArray(Scalar::Type arrayType,FloatRegister value,const BaseIndex & dest,unsigned numElems)287 MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
288 const BaseIndex& dest, unsigned numElems)
289 {
290 StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
291 }
292 void
storeToTypedFloatArray(Scalar::Type arrayType,FloatRegister value,const Address & dest,unsigned numElems)293 MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
294 const Address& dest, unsigned numElems)
295 {
296 StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
297 }
298
299 template<typename T>
300 void
loadFromTypedArray(Scalar::Type arrayType,const T & src,AnyRegister dest,Register temp,Label * fail,bool canonicalizeDoubles,unsigned numElems)301 MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp,
302 Label* fail, bool canonicalizeDoubles, unsigned numElems)
303 {
304 switch (arrayType) {
305 case Scalar::Int8:
306 load8SignExtend(src, dest.gpr());
307 break;
308 case Scalar::Uint8:
309 case Scalar::Uint8Clamped:
310 load8ZeroExtend(src, dest.gpr());
311 break;
312 case Scalar::Int16:
313 load16SignExtend(src, dest.gpr());
314 break;
315 case Scalar::Uint16:
316 load16ZeroExtend(src, dest.gpr());
317 break;
318 case Scalar::Int32:
319 load32(src, dest.gpr());
320 break;
321 case Scalar::Uint32:
322 if (dest.isFloat()) {
323 load32(src, temp);
324 convertUInt32ToDouble(temp, dest.fpu());
325 } else {
326 load32(src, dest.gpr());
327
328 // Bail out if the value doesn't fit into a signed int32 value. This
329 // is what allows MLoadUnboxedScalar to have a type() of
330 // MIRType_Int32 for UInt32 array loads.
331 branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
332 }
333 break;
334 case Scalar::Float32:
335 loadFloat32(src, dest.fpu());
336 canonicalizeFloat(dest.fpu());
337 break;
338 case Scalar::Float64:
339 loadDouble(src, dest.fpu());
340 if (canonicalizeDoubles)
341 canonicalizeDouble(dest.fpu());
342 break;
343 case Scalar::Int32x4:
344 switch (numElems) {
345 case 1:
346 loadInt32x1(src, dest.fpu());
347 break;
348 case 2:
349 loadInt32x2(src, dest.fpu());
350 break;
351 case 3:
352 loadInt32x3(src, dest.fpu());
353 break;
354 case 4:
355 loadUnalignedInt32x4(src, dest.fpu());
356 break;
357 default: MOZ_CRASH("unexpected number of elements in SIMD load");
358 }
359 break;
360 case Scalar::Float32x4:
361 switch (numElems) {
362 case 1:
363 loadFloat32(src, dest.fpu());
364 break;
365 case 2:
366 loadDouble(src, dest.fpu());
367 break;
368 case 3:
369 loadFloat32x3(src, dest.fpu());
370 break;
371 case 4:
372 loadUnalignedFloat32x4(src, dest.fpu());
373 break;
374 default: MOZ_CRASH("unexpected number of elements in SIMD load");
375 }
376 break;
377 default:
378 MOZ_CRASH("Invalid typed array type");
379 }
380 }
381
382 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src, AnyRegister dest,
383 Register temp, Label* fail, bool canonicalizeDoubles,
384 unsigned numElems);
385 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src, AnyRegister dest,
386 Register temp, Label* fail, bool canonicalizeDoubles,
387 unsigned numElems);
388
389 template<typename T>
390 void
loadFromTypedArray(Scalar::Type arrayType,const T & src,const ValueOperand & dest,bool allowDouble,Register temp,Label * fail)391 MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest,
392 bool allowDouble, Register temp, Label* fail)
393 {
394 switch (arrayType) {
395 case Scalar::Int8:
396 case Scalar::Uint8:
397 case Scalar::Uint8Clamped:
398 case Scalar::Int16:
399 case Scalar::Uint16:
400 case Scalar::Int32:
401 loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()), InvalidReg, nullptr);
402 tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
403 break;
404 case Scalar::Uint32:
405 // Don't clobber dest when we could fail, instead use temp.
406 load32(src, temp);
407 if (allowDouble) {
408 // If the value fits in an int32, store an int32 type tag.
409 // Else, convert the value to double and box it.
410 Label done, isDouble;
411 branchTest32(Assembler::Signed, temp, temp, &isDouble);
412 {
413 tagValue(JSVAL_TYPE_INT32, temp, dest);
414 jump(&done);
415 }
416 bind(&isDouble);
417 {
418 convertUInt32ToDouble(temp, ScratchDoubleReg);
419 boxDouble(ScratchDoubleReg, dest);
420 }
421 bind(&done);
422 } else {
423 // Bailout if the value does not fit in an int32.
424 branchTest32(Assembler::Signed, temp, temp, fail);
425 tagValue(JSVAL_TYPE_INT32, temp, dest);
426 }
427 break;
428 case Scalar::Float32:
429 loadFromTypedArray(arrayType, src, AnyRegister(ScratchFloat32Reg), dest.scratchReg(),
430 nullptr);
431 convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
432 boxDouble(ScratchDoubleReg, dest);
433 break;
434 case Scalar::Float64:
435 loadFromTypedArray(arrayType, src, AnyRegister(ScratchDoubleReg), dest.scratchReg(),
436 nullptr);
437 boxDouble(ScratchDoubleReg, dest);
438 break;
439 default:
440 MOZ_CRASH("Invalid typed array type");
441 }
442 }
443
444 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src, const ValueOperand& dest,
445 bool allowDouble, Register temp, Label* fail);
446 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src, const ValueOperand& dest,
447 bool allowDouble, Register temp, Label* fail);
448
449 template <typename T>
450 void
loadUnboxedProperty(T address,JSValueType type,TypedOrValueRegister output)451 MacroAssembler::loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output)
452 {
453 switch (type) {
454 case JSVAL_TYPE_INT32: {
455 // Handle loading an int32 into a double reg.
456 if (output.type() == MIRType_Double) {
457 convertInt32ToDouble(address, output.typedReg().fpu());
458 break;
459 }
460 // Fallthrough.
461 }
462
463 case JSVAL_TYPE_BOOLEAN:
464 case JSVAL_TYPE_STRING: {
465 Register outReg;
466 if (output.hasValue()) {
467 outReg = output.valueReg().scratchReg();
468 } else {
469 MOZ_ASSERT(output.type() == MIRTypeFromValueType(type));
470 outReg = output.typedReg().gpr();
471 }
472
473 switch (type) {
474 case JSVAL_TYPE_BOOLEAN:
475 load8ZeroExtend(address, outReg);
476 break;
477 case JSVAL_TYPE_INT32:
478 load32(address, outReg);
479 break;
480 case JSVAL_TYPE_STRING:
481 loadPtr(address, outReg);
482 break;
483 default:
484 MOZ_CRASH();
485 }
486
487 if (output.hasValue())
488 tagValue(type, outReg, output.valueReg());
489 break;
490 }
491
492 case JSVAL_TYPE_OBJECT:
493 if (output.hasValue()) {
494 Register scratch = output.valueReg().scratchReg();
495 loadPtr(address, scratch);
496
497 Label notNull, done;
498 branchPtr(Assembler::NotEqual, scratch, ImmWord(0), ¬Null);
499
500 moveValue(NullValue(), output.valueReg());
501 jump(&done);
502
503 bind(¬Null);
504 tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
505
506 bind(&done);
507 } else {
508 // Reading null can't be possible here, as otherwise the result
509 // would be a value (either because null has been read before or
510 // because there is a barrier).
511 Register reg = output.typedReg().gpr();
512 loadPtr(address, reg);
513 #ifdef DEBUG
514 Label ok;
515 branchTestPtr(Assembler::NonZero, reg, reg, &ok);
516 assumeUnreachable("Null not possible");
517 bind(&ok);
518 #endif
519 }
520 break;
521
522 case JSVAL_TYPE_DOUBLE:
523 // Note: doubles in unboxed objects are not accessed through other
524 // views and do not need canonicalization.
525 if (output.hasValue())
526 loadValue(address, output.valueReg());
527 else
528 loadDouble(address, output.typedReg().fpu());
529 break;
530
531 default:
532 MOZ_CRASH();
533 }
534 }
535
536 template void
537 MacroAssembler::loadUnboxedProperty(Address address, JSValueType type,
538 TypedOrValueRegister output);
539
540 template void
541 MacroAssembler::loadUnboxedProperty(BaseIndex address, JSValueType type,
542 TypedOrValueRegister output);
543
544 static void
StoreUnboxedFailure(MacroAssembler & masm,Label * failure)545 StoreUnboxedFailure(MacroAssembler& masm, Label* failure)
546 {
547 // Storing a value to an unboxed property is a fallible operation and
548 // the caller must provide a failure label if a particular unboxed store
549 // might fail. Sometimes, however, a store that cannot succeed (such as
550 // storing a string to an int32 property) will be marked as infallible.
551 // This can only happen if the code involved is unreachable.
552 if (failure)
553 masm.jump(failure);
554 else
555 masm.assumeUnreachable("Incompatible write to unboxed property");
556 }
557
558 template <typename T>
559 void
storeUnboxedProperty(T address,JSValueType type,ConstantOrRegister value,Label * failure)560 MacroAssembler::storeUnboxedProperty(T address, JSValueType type,
561 ConstantOrRegister value, Label* failure)
562 {
563 switch (type) {
564 case JSVAL_TYPE_BOOLEAN:
565 if (value.constant()) {
566 if (value.value().isBoolean())
567 store8(Imm32(value.value().toBoolean()), address);
568 else
569 StoreUnboxedFailure(*this, failure);
570 } else if (value.reg().hasTyped()) {
571 if (value.reg().type() == MIRType_Boolean)
572 store8(value.reg().typedReg().gpr(), address);
573 else
574 StoreUnboxedFailure(*this, failure);
575 } else {
576 if (failure)
577 branchTestBoolean(Assembler::NotEqual, value.reg().valueReg(), failure);
578 storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ 1);
579 }
580 break;
581
582 case JSVAL_TYPE_INT32:
583 if (value.constant()) {
584 if (value.value().isInt32())
585 store32(Imm32(value.value().toInt32()), address);
586 else
587 StoreUnboxedFailure(*this, failure);
588 } else if (value.reg().hasTyped()) {
589 if (value.reg().type() == MIRType_Int32)
590 store32(value.reg().typedReg().gpr(), address);
591 else
592 StoreUnboxedFailure(*this, failure);
593 } else {
594 if (failure)
595 branchTestInt32(Assembler::NotEqual, value.reg().valueReg(), failure);
596 storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ 4);
597 }
598 break;
599
600 case JSVAL_TYPE_DOUBLE:
601 if (value.constant()) {
602 if (value.value().isNumber()) {
603 loadConstantDouble(value.value().toNumber(), ScratchDoubleReg);
604 storeDouble(ScratchDoubleReg, address);
605 } else {
606 StoreUnboxedFailure(*this, failure);
607 }
608 } else if (value.reg().hasTyped()) {
609 if (value.reg().type() == MIRType_Int32) {
610 convertInt32ToDouble(value.reg().typedReg().gpr(), ScratchDoubleReg);
611 storeDouble(ScratchDoubleReg, address);
612 } else if (value.reg().type() == MIRType_Double) {
613 storeDouble(value.reg().typedReg().fpu(), address);
614 } else {
615 StoreUnboxedFailure(*this, failure);
616 }
617 } else {
618 ValueOperand reg = value.reg().valueReg();
619 Label notInt32, end;
620 branchTestInt32(Assembler::NotEqual, reg, ¬Int32);
621 int32ValueToDouble(reg, ScratchDoubleReg);
622 storeDouble(ScratchDoubleReg, address);
623 jump(&end);
624 bind(¬Int32);
625 if (failure)
626 branchTestDouble(Assembler::NotEqual, reg, failure);
627 storeValue(reg, address);
628 bind(&end);
629 }
630 break;
631
632 case JSVAL_TYPE_OBJECT:
633 if (value.constant()) {
634 if (value.value().isObjectOrNull())
635 storePtr(ImmGCPtr(value.value().toObjectOrNull()), address);
636 else
637 StoreUnboxedFailure(*this, failure);
638 } else if (value.reg().hasTyped()) {
639 MOZ_ASSERT(value.reg().type() != MIRType_Null);
640 if (value.reg().type() == MIRType_Object)
641 storePtr(value.reg().typedReg().gpr(), address);
642 else
643 StoreUnboxedFailure(*this, failure);
644 } else {
645 if (failure) {
646 Label ok;
647 branchTestNull(Assembler::Equal, value.reg().valueReg(), &ok);
648 branchTestObject(Assembler::NotEqual, value.reg().valueReg(), failure);
649 bind(&ok);
650 }
651 storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ sizeof(uintptr_t));
652 }
653 break;
654
655 case JSVAL_TYPE_STRING:
656 if (value.constant()) {
657 if (value.value().isString())
658 storePtr(ImmGCPtr(value.value().toString()), address);
659 else
660 StoreUnboxedFailure(*this, failure);
661 } else if (value.reg().hasTyped()) {
662 if (value.reg().type() == MIRType_String)
663 storePtr(value.reg().typedReg().gpr(), address);
664 else
665 StoreUnboxedFailure(*this, failure);
666 } else {
667 if (failure)
668 branchTestString(Assembler::NotEqual, value.reg().valueReg(), failure);
669 storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ sizeof(uintptr_t));
670 }
671 break;
672
673 default:
674 MOZ_CRASH();
675 }
676 }
677
678 template void
679 MacroAssembler::storeUnboxedProperty(Address address, JSValueType type,
680 ConstantOrRegister value, Label* failure);
681
682 template void
683 MacroAssembler::storeUnboxedProperty(BaseIndex address, JSValueType type,
684 ConstantOrRegister value, Label* failure);
685
686 void
checkUnboxedArrayCapacity(Register obj,const Int32Key & index,Register temp,Label * failure)687 MacroAssembler::checkUnboxedArrayCapacity(Register obj, const Int32Key& index, Register temp,
688 Label* failure)
689 {
690 Address initLengthAddr(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
691 Address lengthAddr(obj, UnboxedArrayObject::offsetOfLength());
692
693 Label capacityIsIndex, done;
694 load32(initLengthAddr, temp);
695 branchTest32(Assembler::NonZero, temp, Imm32(UnboxedArrayObject::CapacityMask), &capacityIsIndex);
696 branchKey(Assembler::BelowOrEqual, lengthAddr, index, failure);
697 jump(&done);
698 bind(&capacityIsIndex);
699
700 // Do a partial shift so that we can get an absolute offset from the base
701 // of CapacityArray to use.
702 JS_STATIC_ASSERT(sizeof(UnboxedArrayObject::CapacityArray[0]) == 4);
703 rshiftPtr(Imm32(UnboxedArrayObject::CapacityShift - 2), temp);
704 and32(Imm32(~0x3), temp);
705
706 addPtr(ImmPtr(&UnboxedArrayObject::CapacityArray), temp);
707 branchKey(Assembler::BelowOrEqual, Address(temp, 0), index, failure);
708 bind(&done);
709 }
710
711 // Inlined version of gc::CheckAllocatorState that checks the bare essentials
712 // and bails for anything that cannot be handled with our jit allocators.
713 void
checkAllocatorState(Label * fail)714 MacroAssembler::checkAllocatorState(Label* fail)
715 {
716 // Don't execute the inline path if we are tracing allocations,
717 // or when the memory profiler is enabled.
718 if (js::gc::TraceEnabled() || MemProfiler::enabled())
719 jump(fail);
720
721 # ifdef JS_GC_ZEAL
722 // Don't execute the inline path if gc zeal or tracing are active.
723 branch32(Assembler::NotEqual,
724 AbsoluteAddress(GetJitContext()->runtime->addressOfGCZeal()), Imm32(0),
725 fail);
726 # endif
727
728 // Don't execute the inline path if the compartment has an object metadata callback,
729 // as the metadata to use for the object may vary between executions of the op.
730 if (GetJitContext()->compartment->hasObjectMetadataCallback())
731 jump(fail);
732 }
733
734 // Inline version of ShouldNurseryAllocate.
735 bool
shouldNurseryAllocate(gc::AllocKind allocKind,gc::InitialHeap initialHeap)736 MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind, gc::InitialHeap initialHeap)
737 {
738 // Note that Ion elides barriers on writes to objects known to be in the
739 // nursery, so any allocation that can be made into the nursery must be made
740 // into the nursery, even if the nursery is disabled. At runtime these will
741 // take the out-of-line path, which is required to insert a barrier for the
742 // initializing writes.
743 return IsNurseryAllocable(allocKind) && initialHeap != gc::TenuredHeap;
744 }
745
746 // Inline version of Nursery::allocateObject. If the object has dynamic slots,
747 // this fills in the slots_ pointer.
748 void
nurseryAllocate(Register result,Register temp,gc::AllocKind allocKind,size_t nDynamicSlots,gc::InitialHeap initialHeap,Label * fail)749 MacroAssembler::nurseryAllocate(Register result, Register temp, gc::AllocKind allocKind,
750 size_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail)
751 {
752 MOZ_ASSERT(IsNurseryAllocable(allocKind));
753 MOZ_ASSERT(initialHeap != gc::TenuredHeap);
754
755 // We still need to allocate in the nursery, per the comment in
756 // shouldNurseryAllocate; however, we need to insert into the
757 // mallocedBuffers set, so bail to do the nursery allocation in the
758 // interpreter.
759 if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
760 jump(fail);
761 return;
762 }
763
764 // No explicit check for nursery.isEnabled() is needed, as the comparison
765 // with the nursery's end will always fail in such cases.
766 const Nursery& nursery = GetJitContext()->runtime->gcNursery();
767 int thingSize = int(gc::Arena::thingSize(allocKind));
768 int totalSize = thingSize + nDynamicSlots * sizeof(HeapSlot);
769 loadPtr(AbsoluteAddress(nursery.addressOfPosition()), result);
770 computeEffectiveAddress(Address(result, totalSize), temp);
771 branchPtr(Assembler::Below, AbsoluteAddress(nursery.addressOfCurrentEnd()), temp, fail);
772 storePtr(temp, AbsoluteAddress(nursery.addressOfPosition()));
773
774 if (nDynamicSlots) {
775 computeEffectiveAddress(Address(result, thingSize), temp);
776 storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
777 }
778 }
779
780 // Inlined version of FreeList::allocate. This does not fill in slots_.
781 void
freeListAllocate(Register result,Register temp,gc::AllocKind allocKind,Label * fail)782 MacroAssembler::freeListAllocate(Register result, Register temp, gc::AllocKind allocKind, Label* fail)
783 {
784 CompileZone* zone = GetJitContext()->compartment->zone();
785 int thingSize = int(gc::Arena::thingSize(allocKind));
786
787 Label fallback;
788 Label success;
789
790 // Load FreeList::head::first of |zone|'s freeLists for |allocKind|. If
791 // there is no room remaining in the span, fall back to get the next one.
792 loadPtr(AbsoluteAddress(zone->addressOfFreeListFirst(allocKind)), result);
793 branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(zone->addressOfFreeListLast(allocKind)), result, &fallback);
794 computeEffectiveAddress(Address(result, thingSize), temp);
795 storePtr(temp, AbsoluteAddress(zone->addressOfFreeListFirst(allocKind)));
796 jump(&success);
797
798 bind(&fallback);
799 // If there are no FreeSpans left, we bail to finish the allocation. The
800 // interpreter will call |refillFreeLists|, setting up a new FreeList so
801 // that we can continue allocating in the jit.
802 branchPtr(Assembler::Equal, result, ImmPtr(0), fail);
803 // Point the free list head at the subsequent span (which may be empty).
804 loadPtr(Address(result, js::gc::FreeSpan::offsetOfFirst()), temp);
805 storePtr(temp, AbsoluteAddress(zone->addressOfFreeListFirst(allocKind)));
806 loadPtr(Address(result, js::gc::FreeSpan::offsetOfLast()), temp);
807 storePtr(temp, AbsoluteAddress(zone->addressOfFreeListLast(allocKind)));
808
809 bind(&success);
810 }
811
812 void
callMallocStub(size_t nbytes,Register result,Label * fail)813 MacroAssembler::callMallocStub(size_t nbytes, Register result, Label* fail)
814 {
815 // This register must match the one in JitRuntime::generateMallocStub.
816 const Register regNBytes = CallTempReg0;
817
818 MOZ_ASSERT(nbytes > 0);
819 MOZ_ASSERT(nbytes <= INT32_MAX);
820
821 if (regNBytes != result)
822 push(regNBytes);
823 move32(Imm32(nbytes), regNBytes);
824 call(GetJitContext()->runtime->jitRuntime()->mallocStub());
825 if (regNBytes != result) {
826 movePtr(regNBytes, result);
827 pop(regNBytes);
828 }
829 branchTest32(Assembler::Zero, result, result, fail);
830 }
831
832 void
callFreeStub(Register slots)833 MacroAssembler::callFreeStub(Register slots)
834 {
835 // This register must match the one in JitRuntime::generateFreeStub.
836 const Register regSlots = CallTempReg0;
837
838 push(regSlots);
839 movePtr(slots, regSlots);
840 call(GetJitContext()->runtime->jitRuntime()->freeStub());
841 pop(regSlots);
842 }
843
844 // Inlined equivalent of gc::AllocateObject, without failure case handling.
845 void
allocateObject(Register result,Register temp,gc::AllocKind allocKind,uint32_t nDynamicSlots,gc::InitialHeap initialHeap,Label * fail)846 MacroAssembler::allocateObject(Register result, Register temp, gc::AllocKind allocKind,
847 uint32_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail)
848 {
849 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
850
851 checkAllocatorState(fail);
852
853 if (shouldNurseryAllocate(allocKind, initialHeap))
854 return nurseryAllocate(result, temp, allocKind, nDynamicSlots, initialHeap, fail);
855
856 if (!nDynamicSlots)
857 return freeListAllocate(result, temp, allocKind, fail);
858
859 callMallocStub(nDynamicSlots * sizeof(HeapValue), temp, fail);
860
861 Label failAlloc;
862 Label success;
863
864 push(temp);
865 freeListAllocate(result, temp, allocKind, &failAlloc);
866
867 pop(temp);
868 storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
869
870 jump(&success);
871
872 bind(&failAlloc);
873 pop(temp);
874 callFreeStub(temp);
875 jump(fail);
876
877 bind(&success);
878 }
879
880 void
createGCObject(Register obj,Register temp,JSObject * templateObj,gc::InitialHeap initialHeap,Label * fail,bool initContents,bool convertDoubleElements)881 MacroAssembler::createGCObject(Register obj, Register temp, JSObject* templateObj,
882 gc::InitialHeap initialHeap, Label* fail, bool initContents,
883 bool convertDoubleElements)
884 {
885 gc::AllocKind allocKind = templateObj->asTenured().getAllocKind();
886 MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
887
888 uint32_t nDynamicSlots = 0;
889 if (templateObj->isNative()) {
890 nDynamicSlots = templateObj->as<NativeObject>().numDynamicSlots();
891
892 // Arrays with copy on write elements do not need fixed space for an
893 // elements header. The template object, which owns the original
894 // elements, might have another allocation kind.
895 if (templateObj->as<NativeObject>().denseElementsAreCopyOnWrite())
896 allocKind = gc::AllocKind::OBJECT0_BACKGROUND;
897 }
898
899 allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
900 initGCThing(obj, temp, templateObj, initContents, convertDoubleElements);
901 }
902
903
904 // Inlined equivalent of gc::AllocateNonObject, without failure case handling.
905 // Non-object allocation does not need to worry about slots, so can take a
906 // simpler path.
907 void
allocateNonObject(Register result,Register temp,gc::AllocKind allocKind,Label * fail)908 MacroAssembler::allocateNonObject(Register result, Register temp, gc::AllocKind allocKind, Label* fail)
909 {
910 checkAllocatorState(fail);
911 freeListAllocate(result, temp, allocKind, fail);
912 }
913
914 void
newGCString(Register result,Register temp,Label * fail)915 MacroAssembler::newGCString(Register result, Register temp, Label* fail)
916 {
917 allocateNonObject(result, temp, js::gc::AllocKind::STRING, fail);
918 }
919
920 void
newGCFatInlineString(Register result,Register temp,Label * fail)921 MacroAssembler::newGCFatInlineString(Register result, Register temp, Label* fail)
922 {
923 allocateNonObject(result, temp, js::gc::AllocKind::FAT_INLINE_STRING, fail);
924 }
925
926 void
copySlotsFromTemplate(Register obj,const NativeObject * templateObj,uint32_t start,uint32_t end)927 MacroAssembler::copySlotsFromTemplate(Register obj, const NativeObject* templateObj,
928 uint32_t start, uint32_t end)
929 {
930 uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
931 for (unsigned i = start; i < nfixed; i++)
932 storeValue(templateObj->getFixedSlot(i), Address(obj, NativeObject::getFixedSlotOffset(i)));
933 }
934
935 void
fillSlotsWithConstantValue(Address base,Register temp,uint32_t start,uint32_t end,const Value & v)936 MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
937 uint32_t start, uint32_t end, const Value& v)
938 {
939 MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
940
941 if (start >= end)
942 return;
943
944 #ifdef JS_NUNBOX32
945 // We only have a single spare register, so do the initialization as two
946 // strided writes of the tag and body.
947 jsval_layout jv = JSVAL_TO_IMPL(v);
948
949 Address addr = base;
950 move32(Imm32(jv.s.payload.i32), temp);
951 for (unsigned i = start; i < end; ++i, addr.offset += sizeof(HeapValue))
952 store32(temp, ToPayload(addr));
953
954 addr = base;
955 move32(Imm32(jv.s.tag), temp);
956 for (unsigned i = start; i < end; ++i, addr.offset += sizeof(HeapValue))
957 store32(temp, ToType(addr));
958 #else
959 moveValue(v, temp);
960 for (uint32_t i = start; i < end; ++i, base.offset += sizeof(HeapValue))
961 storePtr(temp, base);
962 #endif
963 }
964
965 void
fillSlotsWithUndefined(Address base,Register temp,uint32_t start,uint32_t end)966 MacroAssembler::fillSlotsWithUndefined(Address base, Register temp, uint32_t start, uint32_t end)
967 {
968 fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
969 }
970
971 void
fillSlotsWithUninitialized(Address base,Register temp,uint32_t start,uint32_t end)972 MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp, uint32_t start, uint32_t end)
973 {
974 fillSlotsWithConstantValue(base, temp, start, end, MagicValue(JS_UNINITIALIZED_LEXICAL));
975 }
976
977 static void
FindStartOfUndefinedAndUninitializedSlots(NativeObject * templateObj,uint32_t nslots,uint32_t * startOfUndefined,uint32_t * startOfUninitialized)978 FindStartOfUndefinedAndUninitializedSlots(NativeObject* templateObj, uint32_t nslots,
979 uint32_t* startOfUndefined, uint32_t* startOfUninitialized)
980 {
981 MOZ_ASSERT(nslots == templateObj->lastProperty()->slotSpan(templateObj->getClass()));
982 MOZ_ASSERT(nslots > 0);
983 uint32_t first = nslots;
984 for (; first != 0; --first) {
985 if (!IsUninitializedLexical(templateObj->getSlot(first - 1)))
986 break;
987 }
988 *startOfUninitialized = first;
989 for (; first != 0; --first) {
990 if (templateObj->getSlot(first - 1) != UndefinedValue()) {
991 *startOfUndefined = first;
992 return;
993 }
994 }
995 *startOfUndefined = 0;
996 }
997
998 void
initGCSlots(Register obj,Register temp,NativeObject * templateObj,bool initContents)999 MacroAssembler::initGCSlots(Register obj, Register temp, NativeObject* templateObj,
1000 bool initContents)
1001 {
1002 // Slots of non-array objects are required to be initialized.
1003 // Use the values currently in the template object.
1004 uint32_t nslots = templateObj->lastProperty()->slotSpan(templateObj->getClass());
1005 if (nslots == 0)
1006 return;
1007
1008 uint32_t nfixed = templateObj->numUsedFixedSlots();
1009 uint32_t ndynamic = templateObj->numDynamicSlots();
1010
1011 // Attempt to group slot writes such that we minimize the amount of
1012 // duplicated data we need to embed in code and load into registers. In
1013 // general, most template object slots will be undefined except for any
1014 // reserved slots. Since reserved slots come first, we split the object
1015 // logically into independent non-UndefinedValue writes to the head and
1016 // duplicated writes of UndefinedValue to the tail. For the majority of
1017 // objects, the "tail" will be the entire slot range.
1018 //
1019 // The template object may be a CallObject, in which case we need to
1020 // account for uninitialized lexical slots as well as undefined
1021 // slots. Unitialized lexical slots always appear at the very end of
1022 // slots, after undefined.
1023 uint32_t startOfUndefined = nslots;
1024 uint32_t startOfUninitialized = nslots;
1025 FindStartOfUndefinedAndUninitializedSlots(templateObj, nslots,
1026 &startOfUndefined, &startOfUninitialized);
1027 MOZ_ASSERT(startOfUndefined <= nfixed); // Reserved slots must be fixed.
1028 MOZ_ASSERT_IF(startOfUndefined != nfixed, startOfUndefined <= startOfUninitialized);
1029 MOZ_ASSERT_IF(!templateObj->is<CallObject>(), startOfUninitialized == nslots);
1030
1031 // Copy over any preserved reserved slots.
1032 copySlotsFromTemplate(obj, templateObj, 0, startOfUndefined);
1033
1034 // Fill the rest of the fixed slots with undefined and uninitialized.
1035 if (initContents) {
1036 fillSlotsWithUndefined(Address(obj, NativeObject::getFixedSlotOffset(startOfUndefined)), temp,
1037 startOfUndefined, Min(startOfUninitialized, nfixed));
1038 size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
1039 fillSlotsWithUninitialized(Address(obj, offset), temp, startOfUninitialized, nfixed);
1040 }
1041
1042 if (ndynamic) {
1043 // We are short one register to do this elegantly. Borrow the obj
1044 // register briefly for our slots base address.
1045 push(obj);
1046 loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
1047
1048 // Initially fill all dynamic slots with undefined.
1049 fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
1050
1051 // Fill uninitialized slots if necessary.
1052 fillSlotsWithUninitialized(Address(obj, 0), temp, startOfUninitialized - nfixed,
1053 nslots - startOfUninitialized);
1054
1055 pop(obj);
1056 }
1057 }
1058
1059 void
initGCThing(Register obj,Register temp,JSObject * templateObj,bool initContents,bool convertDoubleElements)1060 MacroAssembler::initGCThing(Register obj, Register temp, JSObject* templateObj,
1061 bool initContents, bool convertDoubleElements)
1062 {
1063 // Fast initialization of an empty object returned by allocateObject().
1064
1065 storePtr(ImmGCPtr(templateObj->group()), Address(obj, JSObject::offsetOfGroup()));
1066
1067 if (Shape* shape = templateObj->maybeShape())
1068 storePtr(ImmGCPtr(shape), Address(obj, JSObject::offsetOfShape()));
1069
1070 MOZ_ASSERT_IF(convertDoubleElements, templateObj->is<ArrayObject>());
1071
1072 if (templateObj->isNative()) {
1073 NativeObject* ntemplate = &templateObj->as<NativeObject>();
1074 MOZ_ASSERT_IF(!ntemplate->denseElementsAreCopyOnWrite(), !ntemplate->hasDynamicElements());
1075
1076 // If the object has dynamic slots, the slots member has already been
1077 // filled in.
1078 if (!ntemplate->hasDynamicSlots())
1079 storePtr(ImmPtr(nullptr), Address(obj, NativeObject::offsetOfSlots()));
1080
1081 if (ntemplate->denseElementsAreCopyOnWrite()) {
1082 storePtr(ImmPtr((const Value*) ntemplate->getDenseElements()),
1083 Address(obj, NativeObject::offsetOfElements()));
1084 } else if (ntemplate->is<ArrayObject>()) {
1085 int elementsOffset = NativeObject::offsetOfFixedElements();
1086
1087 computeEffectiveAddress(Address(obj, elementsOffset), temp);
1088 storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
1089
1090 // Fill in the elements header.
1091 store32(Imm32(ntemplate->getDenseCapacity()),
1092 Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
1093 store32(Imm32(ntemplate->getDenseInitializedLength()),
1094 Address(obj, elementsOffset + ObjectElements::offsetOfInitializedLength()));
1095 store32(Imm32(ntemplate->as<ArrayObject>().length()),
1096 Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
1097 store32(Imm32(convertDoubleElements
1098 ? ObjectElements::CONVERT_DOUBLE_ELEMENTS
1099 : 0),
1100 Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
1101 MOZ_ASSERT(!ntemplate->hasPrivate());
1102 } else {
1103 // If the target type could be a TypedArray that maps shared memory
1104 // then this would need to store emptyObjectElementsShared in that case.
1105 // That cannot happen at present; TypedArray allocation is always
1106 // a VM call.
1107 storePtr(ImmPtr(emptyObjectElements), Address(obj, NativeObject::offsetOfElements()));
1108
1109 initGCSlots(obj, temp, ntemplate, initContents);
1110
1111 if (ntemplate->hasPrivate()) {
1112 uint32_t nfixed = ntemplate->numFixedSlots();
1113 storePtr(ImmPtr(ntemplate->getPrivate()),
1114 Address(obj, NativeObject::getPrivateDataOffset(nfixed)));
1115 }
1116 }
1117 } else if (templateObj->is<InlineTypedObject>()) {
1118 size_t nbytes = templateObj->as<InlineTypedObject>().size();
1119 const uint8_t* memory = templateObj->as<InlineTypedObject>().inlineTypedMem();
1120
1121 // Memcpy the contents of the template object to the new object.
1122 size_t offset = 0;
1123 while (nbytes) {
1124 uintptr_t value = *(uintptr_t*)(memory + offset);
1125 storePtr(ImmWord(value),
1126 Address(obj, InlineTypedObject::offsetOfDataStart() + offset));
1127 nbytes = (nbytes < sizeof(uintptr_t)) ? 0 : nbytes - sizeof(uintptr_t);
1128 offset += sizeof(uintptr_t);
1129 }
1130 } else if (templateObj->is<UnboxedPlainObject>()) {
1131 storePtr(ImmWord(0), Address(obj, UnboxedPlainObject::offsetOfExpando()));
1132 if (initContents)
1133 initUnboxedObjectContents(obj, &templateObj->as<UnboxedPlainObject>());
1134 } else if (templateObj->is<UnboxedArrayObject>()) {
1135 MOZ_ASSERT(templateObj->as<UnboxedArrayObject>().hasInlineElements());
1136 int elementsOffset = UnboxedArrayObject::offsetOfInlineElements();
1137 computeEffectiveAddress(Address(obj, elementsOffset), temp);
1138 storePtr(temp, Address(obj, UnboxedArrayObject::offsetOfElements()));
1139 store32(Imm32(templateObj->as<UnboxedArrayObject>().length()),
1140 Address(obj, UnboxedArrayObject::offsetOfLength()));
1141 uint32_t capacityIndex = templateObj->as<UnboxedArrayObject>().capacityIndex();
1142 store32(Imm32(capacityIndex << UnboxedArrayObject::CapacityShift),
1143 Address(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()));
1144 } else {
1145 MOZ_CRASH("Unknown object");
1146 }
1147
1148 #ifdef JS_GC_TRACE
1149 RegisterSet regs = RegisterSet::Volatile();
1150 PushRegsInMask(regs);
1151 regs.takeUnchecked(obj);
1152 Register temp = regs.takeAnyGeneral();
1153
1154 setupUnalignedABICall(temp);
1155 passABIArg(obj);
1156 movePtr(ImmGCPtr(templateObj->type()), temp);
1157 passABIArg(temp);
1158 callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::gc::TraceCreateObject));
1159
1160 PopRegsInMask(RegisterSet::Volatile());
1161 #endif
1162 }
1163
1164 void
initUnboxedObjectContents(Register object,UnboxedPlainObject * templateObject)1165 MacroAssembler::initUnboxedObjectContents(Register object, UnboxedPlainObject* templateObject)
1166 {
1167 const UnboxedLayout& layout = templateObject->layout();
1168
1169 // Initialize reference fields of the object, per UnboxedPlainObject::create.
1170 if (const int32_t* list = layout.traceList()) {
1171 while (*list != -1) {
1172 storePtr(ImmGCPtr(GetJitContext()->runtime->names().empty),
1173 Address(object, UnboxedPlainObject::offsetOfData() + *list));
1174 list++;
1175 }
1176 list++;
1177 while (*list != -1) {
1178 storePtr(ImmWord(0),
1179 Address(object, UnboxedPlainObject::offsetOfData() + *list));
1180 list++;
1181 }
1182 // Unboxed objects don't have Values to initialize.
1183 MOZ_ASSERT(*(list + 1) == -1);
1184 }
1185 }
1186
1187 void
compareStrings(JSOp op,Register left,Register right,Register result,Label * fail)1188 MacroAssembler::compareStrings(JSOp op, Register left, Register right, Register result,
1189 Label* fail)
1190 {
1191 MOZ_ASSERT(IsEqualityOp(op));
1192
1193 Label done;
1194 Label notPointerEqual;
1195 // Fast path for identical strings.
1196 branchPtr(Assembler::NotEqual, left, right, ¬PointerEqual);
1197 move32(Imm32(op == JSOP_EQ || op == JSOP_STRICTEQ), result);
1198 jump(&done);
1199
1200 bind(¬PointerEqual);
1201
1202 Label notAtom;
1203 // Optimize the equality operation to a pointer compare for two atoms.
1204 Imm32 atomBit(JSString::ATOM_BIT);
1205 branchTest32(Assembler::Zero, Address(left, JSString::offsetOfFlags()), atomBit, ¬Atom);
1206 branchTest32(Assembler::Zero, Address(right, JSString::offsetOfFlags()), atomBit, ¬Atom);
1207
1208 cmpPtrSet(JSOpToCondition(MCompare::Compare_String, op), left, right, result);
1209 jump(&done);
1210
1211 bind(¬Atom);
1212 // Strings of different length can never be equal.
1213 loadStringLength(left, result);
1214 branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()), result, fail);
1215 move32(Imm32(op == JSOP_NE || op == JSOP_STRICTNE), result);
1216
1217 bind(&done);
1218 }
1219
1220 void
loadStringChars(Register str,Register dest)1221 MacroAssembler::loadStringChars(Register str, Register dest)
1222 {
1223 Label isInline, done;
1224 branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
1225 Imm32(JSString::INLINE_CHARS_BIT), &isInline);
1226
1227 loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
1228 jump(&done);
1229
1230 bind(&isInline);
1231 computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()), dest);
1232
1233 bind(&done);
1234 }
1235
1236 void
loadStringChar(Register str,Register index,Register output)1237 MacroAssembler::loadStringChar(Register str, Register index, Register output)
1238 {
1239 MOZ_ASSERT(str != output);
1240 MOZ_ASSERT(index != output);
1241
1242 loadStringChars(str, output);
1243
1244 Label isLatin1, done;
1245 branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
1246 Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
1247 load16ZeroExtend(BaseIndex(output, index, TimesTwo), output);
1248 jump(&done);
1249
1250 bind(&isLatin1);
1251 load8ZeroExtend(BaseIndex(output, index, TimesOne), output);
1252
1253 bind(&done);
1254 }
1255
1256 static void
BailoutReportOverRecursed(JSContext * cx)1257 BailoutReportOverRecursed(JSContext* cx)
1258 {
1259 ReportOverRecursed(cx);
1260 }
1261
1262 void
generateBailoutTail(Register scratch,Register bailoutInfo)1263 MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
1264 {
1265 enterExitFrame();
1266
1267 Label baseline;
1268
1269 // The return value from Bailout is tagged as:
1270 // - 0x0: done (enter baseline)
1271 // - 0x1: error (handle exception)
1272 // - 0x2: overrecursed
1273 JS_STATIC_ASSERT(BAILOUT_RETURN_OK == 0);
1274 JS_STATIC_ASSERT(BAILOUT_RETURN_FATAL_ERROR == 1);
1275 JS_STATIC_ASSERT(BAILOUT_RETURN_OVERRECURSED == 2);
1276
1277 branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_OK), &baseline);
1278 branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_FATAL_ERROR), exceptionLabel());
1279
1280 // Fall-through: overrecursed.
1281 {
1282 loadJSContext(ReturnReg);
1283 setupUnalignedABICall(scratch);
1284 passABIArg(ReturnReg);
1285 callWithABI(JS_FUNC_TO_DATA_PTR(void*, BailoutReportOverRecursed));
1286 jump(exceptionLabel());
1287 }
1288
1289 bind(&baseline);
1290 {
1291 // Prepare a register set for use in this case.
1292 AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
1293 MOZ_ASSERT(!regs.has(getStackPointer()));
1294 regs.take(bailoutInfo);
1295
1296 // Reset SP to the point where clobbering starts.
1297 loadStackPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)));
1298
1299 Register copyCur = regs.takeAny();
1300 Register copyEnd = regs.takeAny();
1301 Register temp = regs.takeAny();
1302
1303 // Copy data onto stack.
1304 loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)), copyCur);
1305 loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)), copyEnd);
1306 {
1307 Label copyLoop;
1308 Label endOfCopy;
1309 bind(©Loop);
1310 branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
1311 subPtr(Imm32(4), copyCur);
1312 subFromStackPtr(Imm32(4));
1313 load32(Address(copyCur, 0), temp);
1314 store32(temp, Address(getStackPointer(), 0));
1315 jump(©Loop);
1316 bind(&endOfCopy);
1317 }
1318
1319 // Enter exit frame for the FinishBailoutToBaseline call.
1320 loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)), temp);
1321 load32(Address(temp, BaselineFrame::reverseOffsetOfFrameSize()), temp);
1322 makeFrameDescriptor(temp, JitFrame_BaselineJS);
1323 push(temp);
1324 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1325 // No GC things to mark on the stack, push a bare token.
1326 enterFakeExitFrame(ExitFrameLayoutBareToken);
1327
1328 // If monitorStub is non-null, handle resumeAddr appropriately.
1329 Label noMonitor;
1330 Label done;
1331 branchPtr(Assembler::Equal,
1332 Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)),
1333 ImmPtr(nullptr),
1334 &noMonitor);
1335
1336 //
1337 // Resuming into a monitoring stub chain.
1338 //
1339 {
1340 // Save needed values onto stack temporarily.
1341 pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
1342 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
1343 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1344 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)));
1345
1346 // Call a stub to free allocated memory and create arguments objects.
1347 setupUnalignedABICall(temp);
1348 passABIArg(bailoutInfo);
1349 callWithABI(JS_FUNC_TO_DATA_PTR(void*, FinishBailoutToBaseline));
1350 branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
1351
1352 // Restore values where they need to be and resume execution.
1353 AllocatableGeneralRegisterSet enterMonRegs(GeneralRegisterSet::All());
1354 enterMonRegs.take(R0);
1355 enterMonRegs.take(ICStubReg);
1356 enterMonRegs.take(BaselineFrameReg);
1357 enterMonRegs.takeUnchecked(ICTailCallReg);
1358
1359 pop(ICStubReg);
1360 pop(ICTailCallReg);
1361 pop(BaselineFrameReg);
1362 popValue(R0);
1363
1364 // Discard exit frame.
1365 addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
1366
1367 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
1368 push(ICTailCallReg);
1369 #endif
1370 jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
1371 }
1372
1373 //
1374 // Resuming into main jitcode.
1375 //
1376 bind(&noMonitor);
1377 {
1378 // Save needed values onto stack temporarily.
1379 pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
1380 pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR1)));
1381 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
1382 push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1383
1384 // Call a stub to free allocated memory and create arguments objects.
1385 setupUnalignedABICall(temp);
1386 passABIArg(bailoutInfo);
1387 callWithABI(JS_FUNC_TO_DATA_PTR(void*, FinishBailoutToBaseline));
1388 branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
1389
1390 // Restore values where they need to be and resume execution.
1391 AllocatableGeneralRegisterSet enterRegs(GeneralRegisterSet::All());
1392 enterRegs.take(R0);
1393 enterRegs.take(R1);
1394 enterRegs.take(BaselineFrameReg);
1395 Register jitcodeReg = enterRegs.takeAny();
1396
1397 pop(jitcodeReg);
1398 pop(BaselineFrameReg);
1399 popValue(R1);
1400 popValue(R0);
1401
1402 // Discard exit frame.
1403 addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
1404
1405 jump(jitcodeReg);
1406 }
1407 }
1408 }
1409
1410 void
loadBaselineOrIonRaw(Register script,Register dest,Label * failure)1411 MacroAssembler::loadBaselineOrIonRaw(Register script, Register dest, Label* failure)
1412 {
1413 loadPtr(Address(script, JSScript::offsetOfBaselineOrIonRaw()), dest);
1414 if (failure)
1415 branchTestPtr(Assembler::Zero, dest, dest, failure);
1416 }
1417
1418 void
loadBaselineOrIonNoArgCheck(Register script,Register dest,Label * failure)1419 MacroAssembler::loadBaselineOrIonNoArgCheck(Register script, Register dest, Label* failure)
1420 {
1421 loadPtr(Address(script, JSScript::offsetOfBaselineOrIonSkipArgCheck()), dest);
1422 if (failure)
1423 branchTestPtr(Assembler::Zero, dest, dest, failure);
1424 }
1425
1426 void
loadBaselineFramePtr(Register framePtr,Register dest)1427 MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest)
1428 {
1429 if (framePtr != dest)
1430 movePtr(framePtr, dest);
1431 subPtr(Imm32(BaselineFrame::Size()), dest);
1432 }
1433
1434 void
handleFailure()1435 MacroAssembler::handleFailure()
1436 {
1437 // Re-entry code is irrelevant because the exception will leave the
1438 // running function and never come back
1439 JitCode* excTail = GetJitContext()->runtime->jitRuntime()->getExceptionTail();
1440 jump(excTail);
1441 }
1442
1443 #ifdef DEBUG
1444 static void
AssumeUnreachable_(const char * output)1445 AssumeUnreachable_(const char* output) {
1446 MOZ_ReportAssertionFailure(output, __FILE__, __LINE__);
1447 }
1448 #endif
1449
1450 void
assumeUnreachable(const char * output)1451 MacroAssembler::assumeUnreachable(const char* output)
1452 {
1453 #ifdef DEBUG
1454 if (!IsCompilingAsmJS()) {
1455 AllocatableRegisterSet regs(RegisterSet::Volatile());
1456 LiveRegisterSet save(regs.asLiveSet());
1457 PushRegsInMask(save);
1458 Register temp = regs.takeAnyGeneral();
1459
1460 setupUnalignedABICall(temp);
1461 movePtr(ImmPtr(output), temp);
1462 passABIArg(temp);
1463 callWithABI(JS_FUNC_TO_DATA_PTR(void*, AssumeUnreachable_));
1464
1465 PopRegsInMask(save);
1466 }
1467 #endif
1468
1469 breakpoint();
1470 }
1471
1472 template<typename T>
1473 void
assertTestInt32(Condition cond,const T & value,const char * output)1474 MacroAssembler::assertTestInt32(Condition cond, const T& value, const char* output)
1475 {
1476 #ifdef DEBUG
1477 Label ok;
1478 branchTestInt32(cond, value, &ok);
1479 assumeUnreachable(output);
1480 bind(&ok);
1481 #endif
1482 }
1483
1484 template void MacroAssembler::assertTestInt32(Condition, const Address&, const char*);
1485
1486 static void
Printf0_(const char * output)1487 Printf0_(const char* output) {
1488 // Use stderr instead of stdout because this is only used for debug
1489 // output. stderr is less likely to interfere with the program's normal
1490 // output, and it's always unbuffered.
1491 fprintf(stderr, "%s", output);
1492 }
1493
1494 void
printf(const char * output)1495 MacroAssembler::printf(const char* output)
1496 {
1497 AllocatableRegisterSet regs(RegisterSet::Volatile());
1498 LiveRegisterSet save(regs.asLiveSet());
1499 PushRegsInMask(save);
1500
1501 Register temp = regs.takeAnyGeneral();
1502
1503 setupUnalignedABICall(temp);
1504 movePtr(ImmPtr(output), temp);
1505 passABIArg(temp);
1506 callWithABI(JS_FUNC_TO_DATA_PTR(void*, Printf0_));
1507
1508 PopRegsInMask(save);
1509 }
1510
1511 static void
Printf1_(const char * output,uintptr_t value)1512 Printf1_(const char* output, uintptr_t value) {
1513 char* line = JS_sprintf_append(nullptr, output, value);
1514 fprintf(stderr, "%s", line);
1515 js_free(line);
1516 }
1517
1518 void
printf(const char * output,Register value)1519 MacroAssembler::printf(const char* output, Register value)
1520 {
1521 AllocatableRegisterSet regs(RegisterSet::Volatile());
1522 LiveRegisterSet save(regs.asLiveSet());
1523 PushRegsInMask(save);
1524
1525 regs.takeUnchecked(value);
1526
1527 Register temp = regs.takeAnyGeneral();
1528
1529 setupUnalignedABICall(temp);
1530 movePtr(ImmPtr(output), temp);
1531 passABIArg(temp);
1532 passABIArg(value);
1533 callWithABI(JS_FUNC_TO_DATA_PTR(void*, Printf1_));
1534
1535 PopRegsInMask(save);
1536 }
1537
1538 #ifdef JS_TRACE_LOGGING
1539 void
tracelogStartId(Register logger,uint32_t textId,bool force)1540 MacroAssembler::tracelogStartId(Register logger, uint32_t textId, bool force)
1541 {
1542 if (!force && !TraceLogTextIdEnabled(textId))
1543 return;
1544
1545 AllocatableRegisterSet regs(RegisterSet::Volatile());
1546 LiveRegisterSet save(regs.asLiveSet());
1547 PushRegsInMask(save);
1548 regs.takeUnchecked(logger);
1549
1550 Register temp = regs.takeAnyGeneral();
1551
1552 setupUnalignedABICall(temp);
1553 passABIArg(logger);
1554 move32(Imm32(textId), temp);
1555 passABIArg(temp);
1556 callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStartEventPrivate));
1557
1558 PopRegsInMask(save);
1559 }
1560
1561 void
tracelogStartId(Register logger,Register textId)1562 MacroAssembler::tracelogStartId(Register logger, Register textId)
1563 {
1564 AllocatableRegisterSet regs(RegisterSet::Volatile());
1565 LiveRegisterSet save(regs.asLiveSet());
1566 PushRegsInMask(save);
1567 regs.takeUnchecked(logger);
1568 regs.takeUnchecked(textId);
1569
1570 Register temp = regs.takeAnyGeneral();
1571
1572 setupUnalignedABICall(temp);
1573 passABIArg(logger);
1574 passABIArg(textId);
1575 callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStartEventPrivate));
1576
1577 PopRegsInMask(save);
1578 }
1579
1580 void
tracelogStartEvent(Register logger,Register event)1581 MacroAssembler::tracelogStartEvent(Register logger, Register event)
1582 {
1583 void (&TraceLogFunc)(TraceLoggerThread*, const TraceLoggerEvent&) = TraceLogStartEvent;
1584
1585 AllocatableRegisterSet regs(RegisterSet::Volatile());
1586 LiveRegisterSet save(regs.asLiveSet());
1587 PushRegsInMask(save);
1588 regs.takeUnchecked(logger);
1589 regs.takeUnchecked(event);
1590
1591 Register temp = regs.takeAnyGeneral();
1592
1593 setupUnalignedABICall(temp);
1594 passABIArg(logger);
1595 passABIArg(event);
1596 callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogFunc));
1597
1598 PopRegsInMask(save);
1599 }
1600
1601 void
tracelogStopId(Register logger,uint32_t textId,bool force)1602 MacroAssembler::tracelogStopId(Register logger, uint32_t textId, bool force)
1603 {
1604 if (!force && !TraceLogTextIdEnabled(textId))
1605 return;
1606
1607 AllocatableRegisterSet regs(RegisterSet::Volatile());
1608 LiveRegisterSet save(regs.asLiveSet());
1609 PushRegsInMask(save);
1610 regs.takeUnchecked(logger);
1611
1612 Register temp = regs.takeAnyGeneral();
1613
1614 setupUnalignedABICall(temp);
1615 passABIArg(logger);
1616 move32(Imm32(textId), temp);
1617 passABIArg(temp);
1618
1619 callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStopEventPrivate));
1620
1621 PopRegsInMask(save);
1622 }
1623
1624 void
tracelogStopId(Register logger,Register textId)1625 MacroAssembler::tracelogStopId(Register logger, Register textId)
1626 {
1627 AllocatableRegisterSet regs(RegisterSet::Volatile());
1628 LiveRegisterSet save(regs.asLiveSet());
1629 PushRegsInMask(save);
1630 regs.takeUnchecked(logger);
1631 regs.takeUnchecked(textId);
1632
1633 Register temp = regs.takeAnyGeneral();
1634
1635 setupUnalignedABICall(temp);
1636 passABIArg(logger);
1637 passABIArg(textId);
1638 callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStopEventPrivate));
1639
1640 PopRegsInMask(save);
1641 }
1642 #endif
1643
1644 void
convertInt32ValueToDouble(const Address & address,Register scratch,Label * done)1645 MacroAssembler::convertInt32ValueToDouble(const Address& address, Register scratch, Label* done)
1646 {
1647 branchTestInt32(Assembler::NotEqual, address, done);
1648 unboxInt32(address, scratch);
1649 convertInt32ToDouble(scratch, ScratchDoubleReg);
1650 storeDouble(ScratchDoubleReg, address);
1651 }
1652
1653 void
convertValueToFloatingPoint(ValueOperand value,FloatRegister output,Label * fail,MIRType outputType)1654 MacroAssembler::convertValueToFloatingPoint(ValueOperand value, FloatRegister output,
1655 Label* fail, MIRType outputType)
1656 {
1657 Register tag = splitTagForTest(value);
1658
1659 Label isDouble, isInt32, isBool, isNull, done;
1660
1661 branchTestDouble(Assembler::Equal, tag, &isDouble);
1662 branchTestInt32(Assembler::Equal, tag, &isInt32);
1663 branchTestBoolean(Assembler::Equal, tag, &isBool);
1664 branchTestNull(Assembler::Equal, tag, &isNull);
1665 branchTestUndefined(Assembler::NotEqual, tag, fail);
1666
1667 // fall-through: undefined
1668 loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
1669 jump(&done);
1670
1671 bind(&isNull);
1672 loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
1673 jump(&done);
1674
1675 bind(&isBool);
1676 boolValueToFloatingPoint(value, output, outputType);
1677 jump(&done);
1678
1679 bind(&isInt32);
1680 int32ValueToFloatingPoint(value, output, outputType);
1681 jump(&done);
1682
1683 bind(&isDouble);
1684 FloatRegister tmp = output;
1685 if (outputType == MIRType_Float32 && hasMultiAlias())
1686 tmp = ScratchDoubleReg;
1687
1688 unboxDouble(value, tmp);
1689 if (outputType == MIRType_Float32)
1690 convertDoubleToFloat32(tmp, output);
1691
1692 bind(&done);
1693 }
1694
1695 bool
convertValueToFloatingPoint(JSContext * cx,const Value & v,FloatRegister output,Label * fail,MIRType outputType)1696 MacroAssembler::convertValueToFloatingPoint(JSContext* cx, const Value& v, FloatRegister output,
1697 Label* fail, MIRType outputType)
1698 {
1699 if (v.isNumber() || v.isString()) {
1700 double d;
1701 if (v.isNumber())
1702 d = v.toNumber();
1703 else if (!StringToNumber(cx, v.toString(), &d))
1704 return false;
1705
1706 loadConstantFloatingPoint(d, (float)d, output, outputType);
1707 return true;
1708 }
1709
1710 if (v.isBoolean()) {
1711 if (v.toBoolean())
1712 loadConstantFloatingPoint(1.0, 1.0f, output, outputType);
1713 else
1714 loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
1715 return true;
1716 }
1717
1718 if (v.isNull()) {
1719 loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
1720 return true;
1721 }
1722
1723 if (v.isUndefined()) {
1724 loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
1725 return true;
1726 }
1727
1728 MOZ_ASSERT(v.isObject() || v.isSymbol());
1729 jump(fail);
1730 return true;
1731 }
1732
1733 bool
convertConstantOrRegisterToFloatingPoint(JSContext * cx,ConstantOrRegister src,FloatRegister output,Label * fail,MIRType outputType)1734 MacroAssembler::convertConstantOrRegisterToFloatingPoint(JSContext* cx, ConstantOrRegister src,
1735 FloatRegister output, Label* fail,
1736 MIRType outputType)
1737 {
1738 if (src.constant())
1739 return convertValueToFloatingPoint(cx, src.value(), output, fail, outputType);
1740
1741 convertTypedOrValueToFloatingPoint(src.reg(), output, fail, outputType);
1742 return true;
1743 }
1744
1745 void
convertTypedOrValueToFloatingPoint(TypedOrValueRegister src,FloatRegister output,Label * fail,MIRType outputType)1746 MacroAssembler::convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
1747 Label* fail, MIRType outputType)
1748 {
1749 MOZ_ASSERT(IsFloatingPointType(outputType));
1750
1751 if (src.hasValue()) {
1752 convertValueToFloatingPoint(src.valueReg(), output, fail, outputType);
1753 return;
1754 }
1755
1756 bool outputIsDouble = outputType == MIRType_Double;
1757 switch (src.type()) {
1758 case MIRType_Null:
1759 loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
1760 break;
1761 case MIRType_Boolean:
1762 case MIRType_Int32:
1763 convertInt32ToFloatingPoint(src.typedReg().gpr(), output, outputType);
1764 break;
1765 case MIRType_Float32:
1766 if (outputIsDouble) {
1767 convertFloat32ToDouble(src.typedReg().fpu(), output);
1768 } else {
1769 if (src.typedReg().fpu() != output)
1770 moveFloat32(src.typedReg().fpu(), output);
1771 }
1772 break;
1773 case MIRType_Double:
1774 if (outputIsDouble) {
1775 if (src.typedReg().fpu() != output)
1776 moveDouble(src.typedReg().fpu(), output);
1777 } else {
1778 convertDoubleToFloat32(src.typedReg().fpu(), output);
1779 }
1780 break;
1781 case MIRType_Object:
1782 case MIRType_String:
1783 case MIRType_Symbol:
1784 jump(fail);
1785 break;
1786 case MIRType_Undefined:
1787 loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
1788 break;
1789 default:
1790 MOZ_CRASH("Bad MIRType");
1791 }
1792 }
1793
1794 void
convertDoubleToInt(FloatRegister src,Register output,FloatRegister temp,Label * truncateFail,Label * fail,IntConversionBehavior behavior)1795 MacroAssembler::convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
1796 Label* truncateFail, Label* fail,
1797 IntConversionBehavior behavior)
1798 {
1799 switch (behavior) {
1800 case IntConversion_Normal:
1801 case IntConversion_NegativeZeroCheck:
1802 convertDoubleToInt32(src, output, fail, behavior == IntConversion_NegativeZeroCheck);
1803 break;
1804 case IntConversion_Truncate:
1805 branchTruncateDouble(src, output, truncateFail ? truncateFail : fail);
1806 break;
1807 case IntConversion_ClampToUint8:
1808 // Clamping clobbers the input register, so use a temp.
1809 moveDouble(src, temp);
1810 clampDoubleToUint8(temp, output);
1811 break;
1812 }
1813 }
1814
1815 void
convertValueToInt(ValueOperand value,MDefinition * maybeInput,Label * handleStringEntry,Label * handleStringRejoin,Label * truncateDoubleSlow,Register stringReg,FloatRegister temp,Register output,Label * fail,IntConversionBehavior behavior,IntConversionInputKind conversion)1816 MacroAssembler::convertValueToInt(ValueOperand value, MDefinition* maybeInput,
1817 Label* handleStringEntry, Label* handleStringRejoin,
1818 Label* truncateDoubleSlow,
1819 Register stringReg, FloatRegister temp, Register output,
1820 Label* fail, IntConversionBehavior behavior,
1821 IntConversionInputKind conversion)
1822 {
1823 Register tag = splitTagForTest(value);
1824 bool handleStrings = (behavior == IntConversion_Truncate ||
1825 behavior == IntConversion_ClampToUint8) &&
1826 handleStringEntry &&
1827 handleStringRejoin;
1828
1829 MOZ_ASSERT_IF(handleStrings, conversion == IntConversion_Any);
1830
1831 Label done, isInt32, isBool, isDouble, isNull, isString;
1832
1833 branchEqualTypeIfNeeded(MIRType_Int32, maybeInput, tag, &isInt32);
1834 if (conversion == IntConversion_Any || conversion == IntConversion_NumbersOrBoolsOnly)
1835 branchEqualTypeIfNeeded(MIRType_Boolean, maybeInput, tag, &isBool);
1836 branchEqualTypeIfNeeded(MIRType_Double, maybeInput, tag, &isDouble);
1837
1838 if (conversion == IntConversion_Any) {
1839 // If we are not truncating, we fail for anything that's not
1840 // null. Otherwise we might be able to handle strings and objects.
1841 switch (behavior) {
1842 case IntConversion_Normal:
1843 case IntConversion_NegativeZeroCheck:
1844 branchTestNull(Assembler::NotEqual, tag, fail);
1845 break;
1846
1847 case IntConversion_Truncate:
1848 case IntConversion_ClampToUint8:
1849 branchEqualTypeIfNeeded(MIRType_Null, maybeInput, tag, &isNull);
1850 if (handleStrings)
1851 branchEqualTypeIfNeeded(MIRType_String, maybeInput, tag, &isString);
1852 branchEqualTypeIfNeeded(MIRType_Object, maybeInput, tag, fail);
1853 branchTestUndefined(Assembler::NotEqual, tag, fail);
1854 break;
1855 }
1856 } else {
1857 jump(fail);
1858 }
1859
1860 // The value is null or undefined in truncation contexts - just emit 0.
1861 if (isNull.used())
1862 bind(&isNull);
1863 mov(ImmWord(0), output);
1864 jump(&done);
1865
1866 // Try converting a string into a double, then jump to the double case.
1867 if (handleStrings) {
1868 bind(&isString);
1869 unboxString(value, stringReg);
1870 jump(handleStringEntry);
1871 }
1872
1873 // Try converting double into integer.
1874 if (isDouble.used() || handleStrings) {
1875 if (isDouble.used()) {
1876 bind(&isDouble);
1877 unboxDouble(value, temp);
1878 }
1879
1880 if (handleStrings)
1881 bind(handleStringRejoin);
1882
1883 convertDoubleToInt(temp, output, temp, truncateDoubleSlow, fail, behavior);
1884 jump(&done);
1885 }
1886
1887 // Just unbox a bool, the result is 0 or 1.
1888 if (isBool.used()) {
1889 bind(&isBool);
1890 unboxBoolean(value, output);
1891 jump(&done);
1892 }
1893
1894 // Integers can be unboxed.
1895 if (isInt32.used()) {
1896 bind(&isInt32);
1897 unboxInt32(value, output);
1898 if (behavior == IntConversion_ClampToUint8)
1899 clampIntToUint8(output);
1900 }
1901
1902 bind(&done);
1903 }
1904
1905 bool
convertValueToInt(JSContext * cx,const Value & v,Register output,Label * fail,IntConversionBehavior behavior)1906 MacroAssembler::convertValueToInt(JSContext* cx, const Value& v, Register output, Label* fail,
1907 IntConversionBehavior behavior)
1908 {
1909 bool handleStrings = (behavior == IntConversion_Truncate ||
1910 behavior == IntConversion_ClampToUint8);
1911
1912 if (v.isNumber() || (handleStrings && v.isString())) {
1913 double d;
1914 if (v.isNumber())
1915 d = v.toNumber();
1916 else if (!StringToNumber(cx, v.toString(), &d))
1917 return false;
1918
1919 switch (behavior) {
1920 case IntConversion_Normal:
1921 case IntConversion_NegativeZeroCheck: {
1922 // -0 is checked anyways if we have a constant value.
1923 int i;
1924 if (mozilla::NumberIsInt32(d, &i))
1925 move32(Imm32(i), output);
1926 else
1927 jump(fail);
1928 break;
1929 }
1930 case IntConversion_Truncate:
1931 move32(Imm32(ToInt32(d)), output);
1932 break;
1933 case IntConversion_ClampToUint8:
1934 move32(Imm32(ClampDoubleToUint8(d)), output);
1935 break;
1936 }
1937
1938 return true;
1939 }
1940
1941 if (v.isBoolean()) {
1942 move32(Imm32(v.toBoolean() ? 1 : 0), output);
1943 return true;
1944 }
1945
1946 if (v.isNull() || v.isUndefined()) {
1947 move32(Imm32(0), output);
1948 return true;
1949 }
1950
1951 MOZ_ASSERT(v.isObject() || v.isSymbol());
1952
1953 jump(fail);
1954 return true;
1955 }
1956
1957 bool
convertConstantOrRegisterToInt(JSContext * cx,ConstantOrRegister src,FloatRegister temp,Register output,Label * fail,IntConversionBehavior behavior)1958 MacroAssembler::convertConstantOrRegisterToInt(JSContext* cx, ConstantOrRegister src,
1959 FloatRegister temp, Register output,
1960 Label* fail, IntConversionBehavior behavior)
1961 {
1962 if (src.constant())
1963 return convertValueToInt(cx, src.value(), output, fail, behavior);
1964
1965 convertTypedOrValueToInt(src.reg(), temp, output, fail, behavior);
1966 return true;
1967 }
1968
1969 void
convertTypedOrValueToInt(TypedOrValueRegister src,FloatRegister temp,Register output,Label * fail,IntConversionBehavior behavior)1970 MacroAssembler::convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp,
1971 Register output, Label* fail,
1972 IntConversionBehavior behavior)
1973 {
1974 if (src.hasValue()) {
1975 convertValueToInt(src.valueReg(), temp, output, fail, behavior);
1976 return;
1977 }
1978
1979 switch (src.type()) {
1980 case MIRType_Undefined:
1981 case MIRType_Null:
1982 move32(Imm32(0), output);
1983 break;
1984 case MIRType_Boolean:
1985 case MIRType_Int32:
1986 if (src.typedReg().gpr() != output)
1987 move32(src.typedReg().gpr(), output);
1988 if (src.type() == MIRType_Int32 && behavior == IntConversion_ClampToUint8)
1989 clampIntToUint8(output);
1990 break;
1991 case MIRType_Double:
1992 convertDoubleToInt(src.typedReg().fpu(), output, temp, nullptr, fail, behavior);
1993 break;
1994 case MIRType_Float32:
1995 // Conversion to Double simplifies implementation at the expense of performance.
1996 convertFloat32ToDouble(src.typedReg().fpu(), temp);
1997 convertDoubleToInt(temp, output, temp, nullptr, fail, behavior);
1998 break;
1999 case MIRType_String:
2000 case MIRType_Symbol:
2001 case MIRType_Object:
2002 jump(fail);
2003 break;
2004 default:
2005 MOZ_CRASH("Bad MIRType");
2006 }
2007 }
2008
2009 bool
asmMergeWith(const MacroAssembler & other)2010 MacroAssembler::asmMergeWith(const MacroAssembler& other)
2011 {
2012 size_t sizeBeforeMerge = size();
2013
2014 if (!MacroAssemblerSpecific::asmMergeWith(other))
2015 return false;
2016
2017 retargetWithOffset(sizeBeforeMerge, other.asmSyncInterruptLabel(), asmSyncInterruptLabel());
2018 retargetWithOffset(sizeBeforeMerge, other.asmStackOverflowLabel(), asmStackOverflowLabel());
2019 retargetWithOffset(sizeBeforeMerge, other.asmOnOutOfBoundsLabel(), asmOnOutOfBoundsLabel());
2020 retargetWithOffset(sizeBeforeMerge, other.asmOnConversionErrorLabel(), asmOnConversionErrorLabel());
2021 return true;
2022 }
2023
2024 void
finish()2025 MacroAssembler::finish()
2026 {
2027 if (failureLabel_.used()) {
2028 bind(&failureLabel_);
2029 handleFailure();
2030 }
2031
2032 MacroAssemblerSpecific::finish();
2033 }
2034
2035 void
link(JitCode * code)2036 MacroAssembler::link(JitCode* code)
2037 {
2038 MOZ_ASSERT(!oom());
2039 linkSelfReference(code);
2040 linkProfilerCallSites(code);
2041 }
2042
2043 void
branchIfNotInterpretedConstructor(Register fun,Register scratch,Label * label)2044 MacroAssembler::branchIfNotInterpretedConstructor(Register fun, Register scratch, Label* label)
2045 {
2046 // 16-bit loads are slow and unaligned 32-bit loads may be too so
2047 // perform an aligned 32-bit load and adjust the bitmask accordingly.
2048 MOZ_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
2049 MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
2050
2051 // First, ensure it's a scripted function.
2052 load32(Address(fun, JSFunction::offsetOfNargs()), scratch);
2053 int32_t bits = IMM32_16ADJ(JSFunction::INTERPRETED);
2054 branchTest32(Assembler::Zero, scratch, Imm32(bits), label);
2055
2056 // Check if the CONSTRUCTOR bit is set.
2057 bits = IMM32_16ADJ(JSFunction::CONSTRUCTOR);
2058 branchTest32(Assembler::Zero, scratch, Imm32(bits), label);
2059 }
2060
2061 void
branchEqualTypeIfNeeded(MIRType type,MDefinition * maybeDef,Register tag,Label * label)2062 MacroAssembler::branchEqualTypeIfNeeded(MIRType type, MDefinition* maybeDef, Register tag,
2063 Label* label)
2064 {
2065 if (!maybeDef || maybeDef->mightBeType(type)) {
2066 switch (type) {
2067 case MIRType_Null:
2068 branchTestNull(Equal, tag, label);
2069 break;
2070 case MIRType_Boolean:
2071 branchTestBoolean(Equal, tag, label);
2072 break;
2073 case MIRType_Int32:
2074 branchTestInt32(Equal, tag, label);
2075 break;
2076 case MIRType_Double:
2077 branchTestDouble(Equal, tag, label);
2078 break;
2079 case MIRType_String:
2080 branchTestString(Equal, tag, label);
2081 break;
2082 case MIRType_Symbol:
2083 branchTestSymbol(Equal, tag, label);
2084 break;
2085 case MIRType_Object:
2086 branchTestObject(Equal, tag, label);
2087 break;
2088 default:
2089 MOZ_CRASH("Unsupported type");
2090 }
2091 }
2092 }
2093
AutoProfilerCallInstrumentation(MacroAssembler & masm MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)2094 MacroAssembler::AutoProfilerCallInstrumentation::AutoProfilerCallInstrumentation(
2095 MacroAssembler& masm
2096 MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
2097 {
2098 MOZ_GUARD_OBJECT_NOTIFIER_INIT;
2099 if (!masm.emitProfilingInstrumentation_)
2100 return;
2101
2102 Register reg = CallTempReg0;
2103 Register reg2 = CallTempReg1;
2104 masm.push(reg);
2105 masm.push(reg2);
2106
2107 JitContext* icx = GetJitContext();
2108 AbsoluteAddress profilingActivation(icx->runtime->addressOfProfilingActivation());
2109
2110 CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
2111 masm.loadPtr(profilingActivation, reg2);
2112 masm.storePtr(reg, Address(reg2, JitActivation::offsetOfLastProfilingCallSite()));
2113
2114 masm.appendProfilerCallSite(label);
2115
2116 masm.pop(reg2);
2117 masm.pop(reg);
2118 }
2119
2120 void
linkProfilerCallSites(JitCode * code)2121 MacroAssembler::linkProfilerCallSites(JitCode* code)
2122 {
2123 for (size_t i = 0; i < profilerCallSites_.length(); i++) {
2124 CodeOffset offset = profilerCallSites_[i];
2125 CodeLocationLabel location(code, offset);
2126 PatchDataWithValueCheck(location, ImmPtr(location.raw()), ImmPtr((void*)-1));
2127 }
2128 }
2129
2130 void
alignJitStackBasedOnNArgs(Register nargs)2131 MacroAssembler::alignJitStackBasedOnNArgs(Register nargs)
2132 {
2133 if (JitStackValueAlignment == 1)
2134 return;
2135
2136 // A JitFrameLayout is composed of the following:
2137 // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
2138 //
2139 // We want to ensure that the |raddr| address is aligned.
2140 // Which implies that we want to ensure that |this| is aligned.
2141 static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
2142 "No need to consider the JitFrameLayout for aligning the stack");
2143
2144 // Which implies that |argN| is aligned if |nargs| is even, and offset by
2145 // |sizeof(Value)| if |nargs| is odd.
2146 MOZ_ASSERT(JitStackValueAlignment == 2);
2147
2148 // Thus the |padding| is offset by |sizeof(Value)| if |nargs| is even, and
2149 // aligned if |nargs| is odd.
2150
2151 // if (nargs % 2 == 0) {
2152 // if (sp % JitStackAlignment == 0)
2153 // sp -= sizeof(Value);
2154 // MOZ_ASSERT(sp % JitStackAlignment == JitStackAlignment - sizeof(Value));
2155 // } else {
2156 // sp = sp & ~(JitStackAlignment - 1);
2157 // }
2158 Label odd, end;
2159 Label* maybeAssert = &end;
2160 #ifdef DEBUG
2161 Label assert;
2162 maybeAssert = &assert;
2163 #endif
2164 assertStackAlignment(sizeof(Value), 0);
2165 branchTestPtr(Assembler::NonZero, nargs, Imm32(1), &odd);
2166 branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), maybeAssert);
2167 subFromStackPtr(Imm32(sizeof(Value)));
2168 #ifdef DEBUG
2169 bind(&assert);
2170 #endif
2171 assertStackAlignment(JitStackAlignment, sizeof(Value));
2172 jump(&end);
2173 bind(&odd);
2174 andToStackPtr(Imm32(~(JitStackAlignment - 1)));
2175 bind(&end);
2176 }
2177
2178 void
alignJitStackBasedOnNArgs(uint32_t nargs)2179 MacroAssembler::alignJitStackBasedOnNArgs(uint32_t nargs)
2180 {
2181 if (JitStackValueAlignment == 1)
2182 return;
2183
2184 // A JitFrameLayout is composed of the following:
2185 // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
2186 //
2187 // We want to ensure that the |raddr| address is aligned.
2188 // Which implies that we want to ensure that |this| is aligned.
2189 static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
2190 "No need to consider the JitFrameLayout for aligning the stack");
2191
2192 // Which implies that |argN| is aligned if |nargs| is even, and offset by
2193 // |sizeof(Value)| if |nargs| is odd.
2194 MOZ_ASSERT(JitStackValueAlignment == 2);
2195
2196 // Thus the |padding| is offset by |sizeof(Value)| if |nargs| is even, and
2197 // aligned if |nargs| is odd.
2198
2199 assertStackAlignment(sizeof(Value), 0);
2200 if (nargs % 2 == 0) {
2201 Label end;
2202 branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
2203 subFromStackPtr(Imm32(sizeof(Value)));
2204 bind(&end);
2205 assertStackAlignment(JitStackAlignment, sizeof(Value));
2206 } else {
2207 andToStackPtr(Imm32(~(JitStackAlignment - 1)));
2208 }
2209 }
2210
2211 // ===============================================================
2212
MacroAssembler(JSContext * cx,IonScript * ion,JSScript * script,jsbytecode * pc)2213 MacroAssembler::MacroAssembler(JSContext* cx, IonScript* ion,
2214 JSScript* script, jsbytecode* pc)
2215 : framePushed_(0),
2216 #ifdef DEBUG
2217 inCall_(false),
2218 #endif
2219 emitProfilingInstrumentation_(false)
2220 {
2221 constructRoot(cx);
2222 jitContext_.emplace(cx, (js::jit::TempAllocator*)nullptr);
2223 alloc_.emplace(cx);
2224 moveResolver_.setAllocator(*jitContext_->temp);
2225 #if defined(JS_CODEGEN_ARM)
2226 initWithAllocator();
2227 m_buffer.id = GetJitContext()->getNextAssemblerId();
2228 #elif defined(JS_CODEGEN_ARM64)
2229 initWithAllocator();
2230 armbuffer_.id = GetJitContext()->getNextAssemblerId();
2231 #endif
2232 if (ion) {
2233 setFramePushed(ion->frameSize());
2234 if (pc && cx->runtime()->spsProfiler.enabled())
2235 enableProfilingInstrumentation();
2236 }
2237 }
2238
2239 MacroAssembler::AfterICSaveLive
icSaveLive(LiveRegisterSet & liveRegs)2240 MacroAssembler::icSaveLive(LiveRegisterSet& liveRegs)
2241 {
2242 PushRegsInMask(liveRegs);
2243 AfterICSaveLive aic(framePushed());
2244 alignFrameForICArguments(aic);
2245 return aic;
2246 }
2247
2248 bool
icBuildOOLFakeExitFrame(void * fakeReturnAddr,AfterICSaveLive & aic)2249 MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr, AfterICSaveLive& aic)
2250 {
2251 return buildOOLFakeExitFrame(fakeReturnAddr);
2252 }
2253
2254 void
icRestoreLive(LiveRegisterSet & liveRegs,AfterICSaveLive & aic)2255 MacroAssembler::icRestoreLive(LiveRegisterSet& liveRegs, AfterICSaveLive& aic)
2256 {
2257 restoreFrameAlignmentForICArguments(aic);
2258 MOZ_ASSERT(framePushed() == aic.initialStack);
2259 PopRegsInMask(liveRegs);
2260 }
2261
2262 //{{{ check_macroassembler_style
2263 // ===============================================================
2264 // Stack manipulation functions.
2265
2266 void
PushRegsInMask(LiveGeneralRegisterSet set)2267 MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set)
2268 {
2269 PushRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
2270 }
2271
2272 void
PopRegsInMask(LiveRegisterSet set)2273 MacroAssembler::PopRegsInMask(LiveRegisterSet set)
2274 {
2275 PopRegsInMaskIgnore(set, LiveRegisterSet());
2276 }
2277
2278 void
PopRegsInMask(LiveGeneralRegisterSet set)2279 MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set)
2280 {
2281 PopRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
2282 }
2283
2284 void
Push(jsid id,Register scratchReg)2285 MacroAssembler::Push(jsid id, Register scratchReg)
2286 {
2287 if (JSID_IS_GCTHING(id)) {
2288 // If we're pushing a gcthing, then we can't just push the tagged jsid
2289 // value since the GC won't have any idea that the push instruction
2290 // carries a reference to a gcthing. Need to unpack the pointer,
2291 // push it using ImmGCPtr, and then rematerialize the id at runtime.
2292
2293 if (JSID_IS_STRING(id)) {
2294 JSString* str = JSID_TO_STRING(id);
2295 MOZ_ASSERT(((size_t)str & JSID_TYPE_MASK) == 0);
2296 MOZ_ASSERT(JSID_TYPE_STRING == 0x0);
2297 Push(ImmGCPtr(str));
2298 } else {
2299 MOZ_ASSERT(JSID_IS_SYMBOL(id));
2300 JS::Symbol* sym = JSID_TO_SYMBOL(id);
2301 movePtr(ImmGCPtr(sym), scratchReg);
2302 orPtr(Imm32(JSID_TYPE_SYMBOL), scratchReg);
2303 Push(scratchReg);
2304 }
2305 } else {
2306 Push(ImmWord(JSID_BITS(id)));
2307 }
2308 }
2309
2310 void
Push(TypedOrValueRegister v)2311 MacroAssembler::Push(TypedOrValueRegister v)
2312 {
2313 if (v.hasValue()) {
2314 Push(v.valueReg());
2315 } else if (IsFloatingPointType(v.type())) {
2316 FloatRegister reg = v.typedReg().fpu();
2317 if (v.type() == MIRType_Float32) {
2318 convertFloat32ToDouble(reg, ScratchDoubleReg);
2319 reg = ScratchDoubleReg;
2320 }
2321 Push(reg);
2322 } else {
2323 Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr());
2324 }
2325 }
2326
2327 void
Push(ConstantOrRegister v)2328 MacroAssembler::Push(ConstantOrRegister v)
2329 {
2330 if (v.constant())
2331 Push(v.value());
2332 else
2333 Push(v.reg());
2334 }
2335
2336 void
Push(const ValueOperand & val)2337 MacroAssembler::Push(const ValueOperand& val)
2338 {
2339 pushValue(val);
2340 framePushed_ += sizeof(Value);
2341 }
2342
2343 void
Push(const Value & val)2344 MacroAssembler::Push(const Value& val)
2345 {
2346 pushValue(val);
2347 framePushed_ += sizeof(Value);
2348 }
2349
2350 void
Push(JSValueType type,Register reg)2351 MacroAssembler::Push(JSValueType type, Register reg)
2352 {
2353 pushValue(type, reg);
2354 framePushed_ += sizeof(Value);
2355 }
2356
2357 void
PushValue(const Address & addr)2358 MacroAssembler::PushValue(const Address& addr)
2359 {
2360 MOZ_ASSERT(addr.base != getStackPointer());
2361 pushValue(addr);
2362 framePushed_ += sizeof(Value);
2363 }
2364
2365 void
PushEmptyRooted(VMFunction::RootType rootType)2366 MacroAssembler::PushEmptyRooted(VMFunction::RootType rootType)
2367 {
2368 switch (rootType) {
2369 case VMFunction::RootNone:
2370 MOZ_CRASH("Handle must have root type");
2371 case VMFunction::RootObject:
2372 case VMFunction::RootString:
2373 case VMFunction::RootPropertyName:
2374 case VMFunction::RootFunction:
2375 case VMFunction::RootCell:
2376 Push(ImmPtr(nullptr));
2377 break;
2378 case VMFunction::RootValue:
2379 Push(UndefinedValue());
2380 break;
2381 }
2382 }
2383
2384 void
popRooted(VMFunction::RootType rootType,Register cellReg,const ValueOperand & valueReg)2385 MacroAssembler::popRooted(VMFunction::RootType rootType, Register cellReg,
2386 const ValueOperand& valueReg)
2387 {
2388 switch (rootType) {
2389 case VMFunction::RootNone:
2390 MOZ_CRASH("Handle must have root type");
2391 case VMFunction::RootObject:
2392 case VMFunction::RootString:
2393 case VMFunction::RootPropertyName:
2394 case VMFunction::RootFunction:
2395 case VMFunction::RootCell:
2396 Pop(cellReg);
2397 break;
2398 case VMFunction::RootValue:
2399 Pop(valueReg);
2400 break;
2401 }
2402 }
2403
2404 void
adjustStack(int amount)2405 MacroAssembler::adjustStack(int amount)
2406 {
2407 if (amount > 0)
2408 freeStack(amount);
2409 else if (amount < 0)
2410 reserveStack(-amount);
2411 }
2412
2413 void
freeStack(uint32_t amount)2414 MacroAssembler::freeStack(uint32_t amount)
2415 {
2416 MOZ_ASSERT(amount <= framePushed_);
2417 if (amount)
2418 addToStackPtr(Imm32(amount));
2419 framePushed_ -= amount;
2420 }
2421
2422 void
freeStack(Register amount)2423 MacroAssembler::freeStack(Register amount)
2424 {
2425 addToStackPtr(amount);
2426 }
2427
2428 // ===============================================================
2429 // ABI function calls.
2430
2431 void
setupABICall()2432 MacroAssembler::setupABICall()
2433 {
2434 #ifdef DEBUG
2435 MOZ_ASSERT(!inCall_);
2436 inCall_ = true;
2437 #endif
2438
2439 #ifdef JS_SIMULATOR
2440 signature_ = 0;
2441 #endif
2442
2443 // Reinitialize the ABIArg generator.
2444 abiArgs_ = ABIArgGenerator();
2445
2446 #if defined(JS_CODEGEN_ARM)
2447 // On ARM, we need to know what ABI we are using, either in the
2448 // simulator, or based on the configure flags.
2449 #if defined(JS_SIMULATOR_ARM)
2450 abiArgs_.setUseHardFp(UseHardFpABI());
2451 #elif defined(JS_CODEGEN_ARM_HARDFP)
2452 abiArgs_.setUseHardFp(true);
2453 #else
2454 abiArgs_.setUseHardFp(false);
2455 #endif
2456 #endif
2457
2458 #if defined(JS_CODEGEN_MIPS32)
2459 // On MIPS, the system ABI use general registers pairs to encode double
2460 // arguments, after one or 2 integer-like arguments. Unfortunately, the
2461 // Lowering phase is not capable to express it at the moment. So we enforce
2462 // the system ABI here.
2463 abiArgs_.enforceO32ABI();
2464 #endif
2465 }
2466
2467 void
setupAlignedABICall()2468 MacroAssembler::setupAlignedABICall()
2469 {
2470 setupABICall();
2471 dynamicAlignment_ = false;
2472 assertStackAlignment(ABIStackAlignment);
2473
2474 #if defined(JS_CODEGEN_ARM64)
2475 MOZ_CRASH("Not supported on arm64");
2476 #endif
2477 }
2478
2479 void
passABIArg(const MoveOperand & from,MoveOp::Type type)2480 MacroAssembler::passABIArg(const MoveOperand& from, MoveOp::Type type)
2481 {
2482 MOZ_ASSERT(inCall_);
2483 appendSignatureType(type);
2484
2485 ABIArg arg;
2486 switch (type) {
2487 case MoveOp::FLOAT32:
2488 arg = abiArgs_.next(MIRType_Float32);
2489 break;
2490 case MoveOp::DOUBLE:
2491 arg = abiArgs_.next(MIRType_Double);
2492 break;
2493 case MoveOp::GENERAL:
2494 arg = abiArgs_.next(MIRType_Pointer);
2495 break;
2496 default:
2497 MOZ_CRASH("Unexpected argument type");
2498 }
2499
2500 MoveOperand to(*this, arg);
2501 if (from == to)
2502 return;
2503
2504 if (!enoughMemory_)
2505 return;
2506 enoughMemory_ = moveResolver_.addMove(from, to, type);
2507 }
2508
2509 void
callWithABINoProfiler(void * fun,MoveOp::Type result)2510 MacroAssembler::callWithABINoProfiler(void* fun, MoveOp::Type result)
2511 {
2512 appendSignatureType(result);
2513 #ifdef JS_SIMULATOR
2514 fun = Simulator::RedirectNativeFunction(fun, signature());
2515 #endif
2516
2517 uint32_t stackAdjust;
2518 callWithABIPre(&stackAdjust);
2519 call(ImmPtr(fun));
2520 callWithABIPost(stackAdjust, result);
2521 }
2522
2523 void
callWithABINoProfiler(wasm::SymbolicAddress imm,MoveOp::Type result)2524 MacroAssembler::callWithABINoProfiler(wasm::SymbolicAddress imm, MoveOp::Type result)
2525 {
2526 uint32_t stackAdjust;
2527 callWithABIPre(&stackAdjust, /* callFromAsmJS = */ true);
2528 call(imm);
2529 callWithABIPost(stackAdjust, result);
2530 }
2531
2532 // ===============================================================
2533 // Exit frame footer.
2534
2535 void
linkExitFrame()2536 MacroAssembler::linkExitFrame()
2537 {
2538 AbsoluteAddress jitTop(GetJitContext()->runtime->addressOfJitTop());
2539 storeStackPtr(jitTop);
2540 }
2541
2542 void
linkSelfReference(JitCode * code)2543 MacroAssembler::linkSelfReference(JitCode* code)
2544 {
2545 // If this code can transition to C++ code and witness a GC, then we need to store
2546 // the JitCode onto the stack in order to GC it correctly. exitCodePatch should
2547 // be unset if the code never needed to push its JitCode*.
2548 if (hasSelfReference()) {
2549 PatchDataWithValueCheck(CodeLocationLabel(code, selfReferencePatch_),
2550 ImmPtr(code),
2551 ImmPtr((void*)-1));
2552 }
2553 }
2554
2555 //}}} check_macroassembler_style
2556
2557 namespace js {
2558 namespace jit {
2559
2560 #ifdef DEBUG
2561 template <class RegisterType>
AutoGenericRegisterScope(MacroAssembler & masm,RegisterType reg)2562 AutoGenericRegisterScope<RegisterType>::AutoGenericRegisterScope(MacroAssembler& masm, RegisterType reg)
2563 : RegisterType(reg), masm_(masm)
2564 {
2565 masm.debugTrackedRegisters_.add(reg);
2566 }
2567
2568 template AutoGenericRegisterScope<Register>::AutoGenericRegisterScope(MacroAssembler& masm, Register reg);
2569 template AutoGenericRegisterScope<FloatRegister>::AutoGenericRegisterScope(MacroAssembler& masm, FloatRegister reg);
2570 #endif // DEBUG
2571
2572 #ifdef DEBUG
2573 template <class RegisterType>
~AutoGenericRegisterScope()2574 AutoGenericRegisterScope<RegisterType>::~AutoGenericRegisterScope()
2575 {
2576 const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
2577 masm_.debugTrackedRegisters_.take(reg);
2578 }
2579
2580 template AutoGenericRegisterScope<Register>::~AutoGenericRegisterScope();
2581 template AutoGenericRegisterScope<FloatRegister>::~AutoGenericRegisterScope();
2582 #endif // DEBUG
2583
2584 } // namespace jit
2585 } // namespace js
2586