1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/IonCacheIRCompiler.h"
8 #include "mozilla/DebugOnly.h"
9 #include "mozilla/Maybe.h"
10
11 #include <algorithm>
12
13 #include "jit/ABIFunctions.h"
14 #include "jit/BaselineIC.h"
15 #include "jit/CacheIRCompiler.h"
16 #include "jit/IonIC.h"
17 #include "jit/JitFrames.h"
18 #include "jit/JitZone.h"
19 #include "jit/JSJitFrameIter.h"
20 #include "jit/Linker.h"
21 #include "jit/SharedICHelpers.h"
22 #include "jit/VMFunctions.h"
23 #include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
24 #include "proxy/DeadObjectProxy.h"
25 #include "proxy/Proxy.h"
26 #include "util/Memory.h"
27
28 #include "jit/ABIFunctionList-inl.h"
29 #include "jit/JSJitFrameIter-inl.h"
30 #include "jit/MacroAssembler-inl.h"
31 #include "jit/VMFunctionList-inl.h"
32 #include "vm/Realm-inl.h"
33
34 using namespace js;
35 using namespace js::jit;
36
37 using mozilla::DebugOnly;
38 using mozilla::Maybe;
39
40 using JS::ExpandoAndGeneration;
41
42 namespace js {
43 namespace jit {
44
45 // IonCacheIRCompiler compiles CacheIR to IonIC native code.
IonCacheIRCompiler(JSContext * cx,const CacheIRWriter & writer,IonIC * ic,IonScript * ionScript,uint32_t stubDataOffset)46 IonCacheIRCompiler::IonCacheIRCompiler(JSContext* cx,
47 const CacheIRWriter& writer, IonIC* ic,
48 IonScript* ionScript,
49 uint32_t stubDataOffset)
50 : CacheIRCompiler(cx, writer, stubDataOffset, Mode::Ion,
51 StubFieldPolicy::Constant),
52 writer_(writer),
53 ic_(ic),
54 ionScript_(ionScript),
55 savedLiveRegs_(false) {
56 MOZ_ASSERT(ic_);
57 MOZ_ASSERT(ionScript_);
58 }
59
60 template <typename T>
rawPointerStubField(uint32_t offset)61 T IonCacheIRCompiler::rawPointerStubField(uint32_t offset) {
62 static_assert(sizeof(T) == sizeof(uintptr_t), "T must have pointer size");
63 return (T)readStubWord(offset, StubField::Type::RawPointer);
64 }
65
66 template <typename T>
rawInt64StubField(uint32_t offset)67 T IonCacheIRCompiler::rawInt64StubField(uint32_t offset) {
68 static_assert(sizeof(T) == sizeof(int64_t), "T musthave int64 size");
69 return (T)readStubInt64(offset, StubField::Type::RawInt64);
70 }
71
72 template <typename Fn, Fn fn>
callVM(MacroAssembler & masm)73 void IonCacheIRCompiler::callVM(MacroAssembler& masm) {
74 VMFunctionId id = VMFunctionToId<Fn, fn>::id;
75 callVMInternal(masm, id);
76 }
77
pushStubCodePointer()78 void IonCacheIRCompiler::pushStubCodePointer() {
79 stubJitCodeOffset_.emplace(masm.PushWithPatch(ImmPtr((void*)-1)));
80 }
81
82 // AutoSaveLiveRegisters must be used when we make a call that can GC. The
83 // constructor ensures all live registers are stored on the stack (where the GC
84 // expects them) and the destructor restores these registers.
AutoSaveLiveRegisters(IonCacheIRCompiler & compiler)85 AutoSaveLiveRegisters::AutoSaveLiveRegisters(IonCacheIRCompiler& compiler)
86 : compiler_(compiler) {
87 MOZ_ASSERT(compiler_.liveRegs_.isSome());
88 MOZ_ASSERT(compiler_.ic_);
89 compiler_.allocator.saveIonLiveRegisters(
90 compiler_.masm, compiler_.liveRegs_.ref(),
91 compiler_.ic_->scratchRegisterForEntryJump(), compiler_.ionScript_);
92 compiler_.savedLiveRegs_ = true;
93 }
~AutoSaveLiveRegisters()94 AutoSaveLiveRegisters::~AutoSaveLiveRegisters() {
95 MOZ_ASSERT(compiler_.stubJitCodeOffset_.isSome(),
96 "Must have pushed JitCode* pointer");
97 compiler_.allocator.restoreIonLiveRegisters(compiler_.masm,
98 compiler_.liveRegs_.ref());
99 MOZ_ASSERT(compiler_.masm.framePushed() == compiler_.ionScript_->frameSize());
100 }
101
102 } // namespace jit
103 } // namespace js
104
saveIonLiveRegisters(MacroAssembler & masm,LiveRegisterSet liveRegs,Register scratch,IonScript * ionScript)105 void CacheRegisterAllocator::saveIonLiveRegisters(MacroAssembler& masm,
106 LiveRegisterSet liveRegs,
107 Register scratch,
108 IonScript* ionScript) {
109 // We have to push all registers in liveRegs on the stack. It's possible we
110 // stored other values in our live registers and stored operands on the
111 // stack (where our live registers should go), so this requires some careful
112 // work. Try to keep it simple by taking one small step at a time.
113
114 // Step 1. Discard any dead operands so we can reuse their registers.
115 freeDeadOperandLocations(masm);
116
117 // Step 2. Figure out the size of our live regs. This is consistent with
118 // the fact that we're using storeRegsInMask to generate the save code and
119 // PopRegsInMask to generate the restore code.
120 size_t sizeOfLiveRegsInBytes = masm.PushRegsInMaskSizeInBytes(liveRegs);
121
122 MOZ_ASSERT(sizeOfLiveRegsInBytes > 0);
123
124 // Step 3. Ensure all non-input operands are on the stack.
125 size_t numInputs = writer_.numInputOperands();
126 for (size_t i = numInputs; i < operandLocations_.length(); i++) {
127 OperandLocation& loc = operandLocations_[i];
128 if (loc.isInRegister()) {
129 spillOperandToStack(masm, &loc);
130 }
131 }
132
133 // Step 4. Restore the register state, but don't discard the stack as
134 // non-input operands are stored there.
135 restoreInputState(masm, /* shouldDiscardStack = */ false);
136
137 // We just restored the input state, so no input operands should be stored
138 // on the stack.
139 #ifdef DEBUG
140 for (size_t i = 0; i < numInputs; i++) {
141 const OperandLocation& loc = operandLocations_[i];
142 MOZ_ASSERT(!loc.isOnStack());
143 }
144 #endif
145
146 // Step 5. At this point our register state is correct. Stack values,
147 // however, may cover the space where we have to store the live registers.
148 // Move them out of the way.
149
150 bool hasOperandOnStack = false;
151 for (size_t i = numInputs; i < operandLocations_.length(); i++) {
152 OperandLocation& loc = operandLocations_[i];
153 if (!loc.isOnStack()) {
154 continue;
155 }
156
157 hasOperandOnStack = true;
158
159 size_t operandSize = loc.stackSizeInBytes();
160 size_t operandStackPushed = loc.stackPushed();
161 MOZ_ASSERT(operandSize > 0);
162 MOZ_ASSERT(stackPushed_ >= operandStackPushed);
163 MOZ_ASSERT(operandStackPushed >= operandSize);
164
165 // If this operand doesn't cover the live register space, there's
166 // nothing to do.
167 if (operandStackPushed - operandSize >= sizeOfLiveRegsInBytes) {
168 MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
169 continue;
170 }
171
172 // Reserve stack space for the live registers if needed.
173 if (sizeOfLiveRegsInBytes > stackPushed_) {
174 size_t extraBytes = sizeOfLiveRegsInBytes - stackPushed_;
175 MOZ_ASSERT((extraBytes % sizeof(uintptr_t)) == 0);
176 masm.subFromStackPtr(Imm32(extraBytes));
177 stackPushed_ += extraBytes;
178 }
179
180 // Push the operand below the live register space.
181 if (loc.kind() == OperandLocation::PayloadStack) {
182 masm.push(
183 Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
184 stackPushed_ += operandSize;
185 loc.setPayloadStack(stackPushed_, loc.payloadType());
186 continue;
187 }
188 MOZ_ASSERT(loc.kind() == OperandLocation::ValueStack);
189 masm.pushValue(
190 Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
191 stackPushed_ += operandSize;
192 loc.setValueStack(stackPushed_);
193 }
194
195 // Step 6. If we have any operands on the stack, adjust their stackPushed
196 // values to not include sizeOfLiveRegsInBytes (this simplifies code down
197 // the line). Then push/store the live registers.
198 if (hasOperandOnStack) {
199 MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
200 stackPushed_ -= sizeOfLiveRegsInBytes;
201
202 for (size_t i = numInputs; i < operandLocations_.length(); i++) {
203 OperandLocation& loc = operandLocations_[i];
204 if (loc.isOnStack()) {
205 loc.adjustStackPushed(-int32_t(sizeOfLiveRegsInBytes));
206 }
207 }
208
209 size_t stackBottom = stackPushed_ + sizeOfLiveRegsInBytes;
210 masm.storeRegsInMask(liveRegs, Address(masm.getStackPointer(), stackBottom),
211 scratch);
212 masm.setFramePushed(masm.framePushed() + sizeOfLiveRegsInBytes);
213 } else {
214 // If no operands are on the stack, discard the unused stack space.
215 if (stackPushed_ > 0) {
216 masm.addToStackPtr(Imm32(stackPushed_));
217 stackPushed_ = 0;
218 }
219 masm.PushRegsInMask(liveRegs);
220 }
221 freePayloadSlots_.clear();
222 freeValueSlots_.clear();
223
224 MOZ_ASSERT(masm.framePushed() ==
225 ionScript->frameSize() + sizeOfLiveRegsInBytes);
226
227 // Step 7. All live registers and non-input operands are stored on the stack
228 // now, so at this point all registers except for the input registers are
229 // available.
230 availableRegs_.set() = GeneralRegisterSet::Not(inputRegisterSet());
231 availableRegsAfterSpill_.set() = GeneralRegisterSet();
232
233 // Step 8. We restored our input state, so we have to fix up aliased input
234 // registers again.
235 fixupAliasedInputs(masm);
236 }
237
restoreIonLiveRegisters(MacroAssembler & masm,LiveRegisterSet liveRegs)238 void CacheRegisterAllocator::restoreIonLiveRegisters(MacroAssembler& masm,
239 LiveRegisterSet liveRegs) {
240 masm.PopRegsInMask(liveRegs);
241
242 availableRegs_.set() = GeneralRegisterSet();
243 availableRegsAfterSpill_.set() = GeneralRegisterSet::All();
244 }
245
GetReturnAddressToIonCode(JSContext * cx)246 static void* GetReturnAddressToIonCode(JSContext* cx) {
247 JSJitFrameIter frame(cx->activation()->asJit());
248 MOZ_ASSERT(frame.type() == FrameType::Exit,
249 "An exit frame is expected as update functions are called with a "
250 "VMFunction.");
251
252 void* returnAddr = frame.returnAddress();
253 #ifdef DEBUG
254 ++frame;
255 MOZ_ASSERT(frame.isIonJS());
256 #endif
257 return returnAddr;
258 }
259
260 // The AutoSaveLiveRegisters parameter is used to ensure registers were saved
prepareVMCall(MacroAssembler & masm,const AutoSaveLiveRegisters &)261 void IonCacheIRCompiler::prepareVMCall(MacroAssembler& masm,
262 const AutoSaveLiveRegisters&) {
263 uint32_t descriptor = MakeFrameDescriptor(
264 masm.framePushed(), FrameType::IonJS, IonICCallFrameLayout::Size());
265 pushStubCodePointer();
266 masm.Push(Imm32(descriptor));
267 masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
268
269 preparedForVMCall_ = true;
270 }
271
init()272 bool IonCacheIRCompiler::init() {
273 if (!allocator.init()) {
274 return false;
275 }
276
277 size_t numInputs = writer_.numInputOperands();
278 MOZ_ASSERT(numInputs == NumInputsForCacheKind(ic_->kind()));
279
280 AllocatableGeneralRegisterSet available;
281
282 switch (ic_->kind()) {
283 case CacheKind::GetProp:
284 case CacheKind::GetElem: {
285 IonGetPropertyIC* ic = ic_->asGetPropertyIC();
286 ValueOperand output = ic->output();
287
288 available.add(output);
289
290 liveRegs_.emplace(ic->liveRegs());
291 outputUnchecked_.emplace(output);
292
293 MOZ_ASSERT(numInputs == 1 || numInputs == 2);
294
295 allocator.initInputLocation(0, ic->value());
296 if (numInputs > 1) {
297 allocator.initInputLocation(1, ic->id());
298 }
299 break;
300 }
301 case CacheKind::GetPropSuper:
302 case CacheKind::GetElemSuper: {
303 IonGetPropSuperIC* ic = ic_->asGetPropSuperIC();
304 ValueOperand output = ic->output();
305
306 available.add(output);
307
308 liveRegs_.emplace(ic->liveRegs());
309 outputUnchecked_.emplace(output);
310
311 MOZ_ASSERT(numInputs == 2 || numInputs == 3);
312
313 allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
314
315 if (ic->kind() == CacheKind::GetPropSuper) {
316 MOZ_ASSERT(numInputs == 2);
317 allocator.initInputLocation(1, ic->receiver());
318 } else {
319 MOZ_ASSERT(numInputs == 3);
320 allocator.initInputLocation(1, ic->id());
321 allocator.initInputLocation(2, ic->receiver());
322 }
323 break;
324 }
325 case CacheKind::SetProp:
326 case CacheKind::SetElem: {
327 IonSetPropertyIC* ic = ic_->asSetPropertyIC();
328
329 available.add(ic->temp());
330
331 liveRegs_.emplace(ic->liveRegs());
332
333 allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
334
335 if (ic->kind() == CacheKind::SetProp) {
336 MOZ_ASSERT(numInputs == 2);
337 allocator.initInputLocation(1, ic->rhs());
338 } else {
339 MOZ_ASSERT(numInputs == 3);
340 allocator.initInputLocation(1, ic->id());
341 allocator.initInputLocation(2, ic->rhs());
342 }
343 break;
344 }
345 case CacheKind::GetName: {
346 IonGetNameIC* ic = ic_->asGetNameIC();
347 ValueOperand output = ic->output();
348
349 available.add(output);
350 available.add(ic->temp());
351
352 liveRegs_.emplace(ic->liveRegs());
353 outputUnchecked_.emplace(output);
354
355 MOZ_ASSERT(numInputs == 1);
356 allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
357 break;
358 }
359 case CacheKind::BindName: {
360 IonBindNameIC* ic = ic_->asBindNameIC();
361 Register output = ic->output();
362
363 available.add(output);
364 available.add(ic->temp());
365
366 liveRegs_.emplace(ic->liveRegs());
367 outputUnchecked_.emplace(
368 TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
369
370 MOZ_ASSERT(numInputs == 1);
371 allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
372 break;
373 }
374 case CacheKind::GetIterator: {
375 IonGetIteratorIC* ic = ic_->asGetIteratorIC();
376 Register output = ic->output();
377
378 available.add(output);
379 available.add(ic->temp1());
380 available.add(ic->temp2());
381
382 liveRegs_.emplace(ic->liveRegs());
383 outputUnchecked_.emplace(
384 TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
385
386 MOZ_ASSERT(numInputs == 1);
387 allocator.initInputLocation(0, ic->value());
388 break;
389 }
390 case CacheKind::OptimizeSpreadCall: {
391 auto* ic = ic_->asOptimizeSpreadCallIC();
392 Register output = ic->output();
393
394 available.add(output);
395 available.add(ic->temp());
396
397 liveRegs_.emplace(ic->liveRegs());
398 outputUnchecked_.emplace(
399 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
400
401 MOZ_ASSERT(numInputs == 1);
402 allocator.initInputLocation(0, ic->value());
403 break;
404 }
405 case CacheKind::In: {
406 IonInIC* ic = ic_->asInIC();
407 Register output = ic->output();
408
409 available.add(output);
410
411 liveRegs_.emplace(ic->liveRegs());
412 outputUnchecked_.emplace(
413 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
414
415 MOZ_ASSERT(numInputs == 2);
416 allocator.initInputLocation(0, ic->key());
417 allocator.initInputLocation(
418 1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->object())));
419 break;
420 }
421 case CacheKind::HasOwn: {
422 IonHasOwnIC* ic = ic_->asHasOwnIC();
423 Register output = ic->output();
424
425 available.add(output);
426
427 liveRegs_.emplace(ic->liveRegs());
428 outputUnchecked_.emplace(
429 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
430
431 MOZ_ASSERT(numInputs == 2);
432 allocator.initInputLocation(0, ic->id());
433 allocator.initInputLocation(1, ic->value());
434 break;
435 }
436 case CacheKind::CheckPrivateField: {
437 IonCheckPrivateFieldIC* ic = ic_->asCheckPrivateFieldIC();
438 Register output = ic->output();
439
440 available.add(output);
441
442 liveRegs_.emplace(ic->liveRegs());
443 outputUnchecked_.emplace(
444 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
445
446 MOZ_ASSERT(numInputs == 2);
447 allocator.initInputLocation(0, ic->value());
448 allocator.initInputLocation(1, ic->id());
449 break;
450 }
451 case CacheKind::InstanceOf: {
452 IonInstanceOfIC* ic = ic_->asInstanceOfIC();
453 Register output = ic->output();
454 available.add(output);
455 liveRegs_.emplace(ic->liveRegs());
456 outputUnchecked_.emplace(
457 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
458
459 MOZ_ASSERT(numInputs == 2);
460 allocator.initInputLocation(0, ic->lhs());
461 allocator.initInputLocation(
462 1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->rhs())));
463 break;
464 }
465 case CacheKind::ToPropertyKey: {
466 IonToPropertyKeyIC* ic = ic_->asToPropertyKeyIC();
467 ValueOperand output = ic->output();
468
469 available.add(output);
470
471 liveRegs_.emplace(ic->liveRegs());
472 outputUnchecked_.emplace(TypedOrValueRegister(output));
473
474 MOZ_ASSERT(numInputs == 1);
475 allocator.initInputLocation(0, ic->input());
476 break;
477 }
478 case CacheKind::UnaryArith: {
479 IonUnaryArithIC* ic = ic_->asUnaryArithIC();
480 ValueOperand output = ic->output();
481
482 available.add(output);
483
484 liveRegs_.emplace(ic->liveRegs());
485 outputUnchecked_.emplace(TypedOrValueRegister(output));
486
487 MOZ_ASSERT(numInputs == 1);
488 allocator.initInputLocation(0, ic->input());
489 break;
490 }
491 case CacheKind::BinaryArith: {
492 IonBinaryArithIC* ic = ic_->asBinaryArithIC();
493 ValueOperand output = ic->output();
494
495 available.add(output);
496
497 liveRegs_.emplace(ic->liveRegs());
498 outputUnchecked_.emplace(TypedOrValueRegister(output));
499
500 MOZ_ASSERT(numInputs == 2);
501 allocator.initInputLocation(0, ic->lhs());
502 allocator.initInputLocation(1, ic->rhs());
503 break;
504 }
505 case CacheKind::Compare: {
506 IonCompareIC* ic = ic_->asCompareIC();
507 Register output = ic->output();
508
509 available.add(output);
510
511 liveRegs_.emplace(ic->liveRegs());
512 outputUnchecked_.emplace(
513 TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
514
515 MOZ_ASSERT(numInputs == 2);
516 allocator.initInputLocation(0, ic->lhs());
517 allocator.initInputLocation(1, ic->rhs());
518 break;
519 }
520 case CacheKind::Call:
521 case CacheKind::TypeOf:
522 case CacheKind::ToBool:
523 case CacheKind::GetIntrinsic:
524 case CacheKind::NewArray:
525 case CacheKind::NewObject:
526 MOZ_CRASH("Unsupported IC");
527 }
528
529 liveFloatRegs_ = LiveFloatRegisterSet(liveRegs_->fpus());
530
531 allocator.initAvailableRegs(available);
532 allocator.initAvailableRegsAfterSpill();
533 return true;
534 }
535
compile(IonICStub * stub)536 JitCode* IonCacheIRCompiler::compile(IonICStub* stub) {
537 masm.setFramePushed(ionScript_->frameSize());
538 if (cx_->runtime()->geckoProfiler().enabled()) {
539 masm.enableProfilingInstrumentation();
540 }
541
542 allocator.fixupAliasedInputs(masm);
543
544 CacheIRReader reader(writer_);
545 do {
546 switch (reader.readOp()) {
547 #define DEFINE_OP(op, ...) \
548 case CacheOp::op: \
549 if (!emit##op(reader)) return nullptr; \
550 break;
551 CACHE_IR_OPS(DEFINE_OP)
552 #undef DEFINE_OP
553
554 default:
555 MOZ_CRASH("Invalid op");
556 }
557 allocator.nextOp();
558 } while (reader.more());
559
560 masm.assumeUnreachable("Should have returned from IC");
561
562 // Done emitting the main IC code. Now emit the failure paths.
563 for (size_t i = 0; i < failurePaths.length(); i++) {
564 if (!emitFailurePath(i)) {
565 return nullptr;
566 }
567 Register scratch = ic_->scratchRegisterForEntryJump();
568 CodeOffset offset = masm.movWithPatch(ImmWord(-1), scratch);
569 masm.jump(Address(scratch, 0));
570 if (!nextCodeOffsets_.append(offset)) {
571 return nullptr;
572 }
573 }
574
575 Linker linker(masm);
576 Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Ion));
577 if (!newStubCode) {
578 cx_->recoverFromOutOfMemory();
579 return nullptr;
580 }
581
582 for (CodeOffset offset : nextCodeOffsets_) {
583 Assembler::PatchDataWithValueCheck(CodeLocationLabel(newStubCode, offset),
584 ImmPtr(stub->nextCodeRawPtr()),
585 ImmPtr((void*)-1));
586 }
587 if (stubJitCodeOffset_) {
588 Assembler::PatchDataWithValueCheck(
589 CodeLocationLabel(newStubCode, *stubJitCodeOffset_),
590 ImmPtr(newStubCode.get()), ImmPtr((void*)-1));
591 }
592
593 return newStubCode;
594 }
595
596 #ifdef DEBUG
assertFloatRegisterAvailable(FloatRegister reg)597 void IonCacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
598 switch (ic_->kind()) {
599 case CacheKind::GetProp:
600 case CacheKind::GetElem:
601 case CacheKind::GetPropSuper:
602 case CacheKind::GetElemSuper:
603 case CacheKind::GetName:
604 case CacheKind::BindName:
605 case CacheKind::GetIterator:
606 case CacheKind::In:
607 case CacheKind::HasOwn:
608 case CacheKind::CheckPrivateField:
609 case CacheKind::InstanceOf:
610 case CacheKind::UnaryArith:
611 case CacheKind::ToPropertyKey:
612 case CacheKind::OptimizeSpreadCall:
613 MOZ_CRASH("No float registers available");
614 case CacheKind::SetProp:
615 case CacheKind::SetElem:
616 // FloatReg0 is available per LIRGenerator::visitSetPropertyCache.
617 MOZ_ASSERT(reg == FloatReg0);
618 break;
619 case CacheKind::BinaryArith:
620 case CacheKind::Compare:
621 // FloatReg0 and FloatReg1 are available per
622 // LIRGenerator::visitBinaryCache.
623 MOZ_ASSERT(reg == FloatReg0 || reg == FloatReg1);
624 break;
625 case CacheKind::Call:
626 case CacheKind::TypeOf:
627 case CacheKind::ToBool:
628 case CacheKind::GetIntrinsic:
629 case CacheKind::NewArray:
630 case CacheKind::NewObject:
631 MOZ_CRASH("Unsupported IC");
632 }
633 }
634 #endif
635
emitGuardShape(ObjOperandId objId,uint32_t shapeOffset)636 bool IonCacheIRCompiler::emitGuardShape(ObjOperandId objId,
637 uint32_t shapeOffset) {
638 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
639 Register obj = allocator.useRegister(masm, objId);
640 Shape* shape = shapeStubField(shapeOffset);
641
642 bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
643
644 Maybe<AutoScratchRegister> maybeScratch;
645 if (needSpectreMitigations) {
646 maybeScratch.emplace(allocator, masm);
647 }
648
649 FailurePath* failure;
650 if (!addFailurePath(&failure)) {
651 return false;
652 }
653
654 if (needSpectreMitigations) {
655 masm.branchTestObjShape(Assembler::NotEqual, obj, shape, *maybeScratch, obj,
656 failure->label());
657 } else {
658 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj, shape,
659 failure->label());
660 }
661
662 return true;
663 }
664
emitGuardProto(ObjOperandId objId,uint32_t protoOffset)665 bool IonCacheIRCompiler::emitGuardProto(ObjOperandId objId,
666 uint32_t protoOffset) {
667 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
668 Register obj = allocator.useRegister(masm, objId);
669 JSObject* proto = objectStubField(protoOffset);
670
671 AutoScratchRegister scratch(allocator, masm);
672
673 FailurePath* failure;
674 if (!addFailurePath(&failure)) {
675 return false;
676 }
677
678 masm.loadObjProto(obj, scratch);
679 masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(proto),
680 failure->label());
681 return true;
682 }
683
emitGuardCompartment(ObjOperandId objId,uint32_t globalOffset,uint32_t compartmentOffset)684 bool IonCacheIRCompiler::emitGuardCompartment(ObjOperandId objId,
685 uint32_t globalOffset,
686 uint32_t compartmentOffset) {
687 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
688 Register obj = allocator.useRegister(masm, objId);
689 JSObject* globalWrapper = objectStubField(globalOffset);
690 JS::Compartment* compartment = compartmentStubField(compartmentOffset);
691 AutoScratchRegister scratch(allocator, masm);
692
693 FailurePath* failure;
694 if (!addFailurePath(&failure)) {
695 return false;
696 }
697
698 // Verify that the global wrapper is still valid, as
699 // it is pre-requisite for doing the compartment check.
700 masm.movePtr(ImmGCPtr(globalWrapper), scratch);
701 Address handlerAddr(scratch, ProxyObject::offsetOfHandler());
702 masm.branchPtr(Assembler::Equal, handlerAddr,
703 ImmPtr(&DeadObjectProxy::singleton), failure->label());
704
705 masm.branchTestObjCompartment(Assembler::NotEqual, obj, compartment, scratch,
706 failure->label());
707 return true;
708 }
709
emitGuardAnyClass(ObjOperandId objId,uint32_t claspOffset)710 bool IonCacheIRCompiler::emitGuardAnyClass(ObjOperandId objId,
711 uint32_t claspOffset) {
712 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
713 Register obj = allocator.useRegister(masm, objId);
714 AutoScratchRegister scratch(allocator, masm);
715
716 const JSClass* clasp = classStubField(claspOffset);
717
718 FailurePath* failure;
719 if (!addFailurePath(&failure)) {
720 return false;
721 }
722
723 if (objectGuardNeedsSpectreMitigations(objId)) {
724 masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
725 failure->label());
726 } else {
727 masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
728 scratch, failure->label());
729 }
730
731 return true;
732 }
733
emitGuardHasProxyHandler(ObjOperandId objId,uint32_t handlerOffset)734 bool IonCacheIRCompiler::emitGuardHasProxyHandler(ObjOperandId objId,
735 uint32_t handlerOffset) {
736 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
737 Register obj = allocator.useRegister(masm, objId);
738 const void* handler = proxyHandlerStubField(handlerOffset);
739
740 FailurePath* failure;
741 if (!addFailurePath(&failure)) {
742 return false;
743 }
744
745 Address handlerAddr(obj, ProxyObject::offsetOfHandler());
746 masm.branchPtr(Assembler::NotEqual, handlerAddr, ImmPtr(handler),
747 failure->label());
748 return true;
749 }
750
emitGuardSpecificObject(ObjOperandId objId,uint32_t expectedOffset)751 bool IonCacheIRCompiler::emitGuardSpecificObject(ObjOperandId objId,
752 uint32_t expectedOffset) {
753 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
754 Register obj = allocator.useRegister(masm, objId);
755 JSObject* expected = objectStubField(expectedOffset);
756
757 FailurePath* failure;
758 if (!addFailurePath(&failure)) {
759 return false;
760 }
761
762 masm.branchPtr(Assembler::NotEqual, obj, ImmGCPtr(expected),
763 failure->label());
764 return true;
765 }
766
emitGuardSpecificFunction(ObjOperandId objId,uint32_t expectedOffset,uint32_t nargsAndFlagsOffset)767 bool IonCacheIRCompiler::emitGuardSpecificFunction(
768 ObjOperandId objId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
769 return emitGuardSpecificObject(objId, expectedOffset);
770 }
771
emitGuardSpecificAtom(StringOperandId strId,uint32_t expectedOffset)772 bool IonCacheIRCompiler::emitGuardSpecificAtom(StringOperandId strId,
773 uint32_t expectedOffset) {
774 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
775 Register str = allocator.useRegister(masm, strId);
776 AutoScratchRegister scratch(allocator, masm);
777
778 JSAtom* atom = &stringStubField(expectedOffset)->asAtom();
779
780 FailurePath* failure;
781 if (!addFailurePath(&failure)) {
782 return false;
783 }
784
785 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
786 liveVolatileFloatRegs());
787 volatileRegs.takeUnchecked(scratch);
788
789 masm.guardSpecificAtom(str, atom, scratch, volatileRegs, failure->label());
790 return true;
791 }
792
emitGuardSpecificSymbol(SymbolOperandId symId,uint32_t expectedOffset)793 bool IonCacheIRCompiler::emitGuardSpecificSymbol(SymbolOperandId symId,
794 uint32_t expectedOffset) {
795 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
796 Register sym = allocator.useRegister(masm, symId);
797 JS::Symbol* expected = symbolStubField(expectedOffset);
798
799 FailurePath* failure;
800 if (!addFailurePath(&failure)) {
801 return false;
802 }
803
804 masm.branchPtr(Assembler::NotEqual, sym, ImmGCPtr(expected),
805 failure->label());
806 return true;
807 }
808
emitLoadValueResult(uint32_t valOffset)809 bool IonCacheIRCompiler::emitLoadValueResult(uint32_t valOffset) {
810 MOZ_CRASH("Baseline-specific op");
811 }
812
emitLoadFixedSlotResult(ObjOperandId objId,uint32_t offsetOffset)813 bool IonCacheIRCompiler::emitLoadFixedSlotResult(ObjOperandId objId,
814 uint32_t offsetOffset) {
815 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
816 AutoOutputRegister output(*this);
817 Register obj = allocator.useRegister(masm, objId);
818 int32_t offset = int32StubField(offsetOffset);
819 masm.loadTypedOrValue(Address(obj, offset), output);
820 return true;
821 }
822
emitLoadFixedSlotTypedResult(ObjOperandId objId,uint32_t offsetOffset,ValueType)823 bool IonCacheIRCompiler::emitLoadFixedSlotTypedResult(ObjOperandId objId,
824 uint32_t offsetOffset,
825 ValueType) {
826 MOZ_CRASH("Call ICs not used in ion");
827 }
828
emitLoadDynamicSlotResult(ObjOperandId objId,uint32_t offsetOffset)829 bool IonCacheIRCompiler::emitLoadDynamicSlotResult(ObjOperandId objId,
830 uint32_t offsetOffset) {
831 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
832 AutoOutputRegister output(*this);
833 Register obj = allocator.useRegister(masm, objId);
834 int32_t offset = int32StubField(offsetOffset);
835
836 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
837 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
838 masm.loadTypedOrValue(Address(scratch, offset), output);
839 return true;
840 }
841
emitCallScriptedGetterResult(ValOperandId receiverId,uint32_t getterOffset,bool sameRealm,uint32_t nargsAndFlagsOffset)842 bool IonCacheIRCompiler::emitCallScriptedGetterResult(
843 ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
844 uint32_t nargsAndFlagsOffset) {
845 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
846 AutoSaveLiveRegisters save(*this);
847 AutoOutputRegister output(*this);
848
849 ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
850
851 JSFunction* target = &objectStubField(getterOffset)->as<JSFunction>();
852 AutoScratchRegister scratch(allocator, masm);
853
854 MOZ_ASSERT(sameRealm == (cx_->realm() == target->realm()));
855
856 allocator.discardStack(masm);
857
858 uint32_t framePushedBefore = masm.framePushed();
859
860 // Construct IonICCallFrameLayout.
861 uint32_t descriptor = MakeFrameDescriptor(
862 masm.framePushed(), FrameType::IonJS, IonICCallFrameLayout::Size());
863 pushStubCodePointer();
864 masm.Push(Imm32(descriptor));
865 masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
866
867 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
868 // so we just have to make sure the stack is aligned after we push the
869 // |this| + argument Values.
870 uint32_t argSize = (target->nargs() + 1) * sizeof(Value);
871 uint32_t padding =
872 ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
873 MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
874 MOZ_ASSERT(padding < JitStackAlignment);
875 masm.reserveStack(padding);
876
877 for (size_t i = 0; i < target->nargs(); i++) {
878 masm.Push(UndefinedValue());
879 }
880 masm.Push(receiver);
881
882 if (!sameRealm) {
883 masm.switchToRealm(target->realm(), scratch);
884 }
885
886 masm.movePtr(ImmGCPtr(target), scratch);
887
888 descriptor = MakeFrameDescriptor(argSize + padding, FrameType::IonICCall,
889 JitFrameLayout::Size());
890 masm.Push(Imm32(0)); // argc
891 masm.Push(scratch);
892 masm.Push(Imm32(descriptor));
893
894 // Check stack alignment. Add sizeof(uintptr_t) for the return address.
895 MOZ_ASSERT(((masm.framePushed() + sizeof(uintptr_t)) % JitStackAlignment) ==
896 0);
897
898 MOZ_ASSERT(target->hasJitEntry());
899 masm.loadJitCodeRaw(scratch, scratch);
900 masm.callJit(scratch);
901
902 if (!sameRealm) {
903 static_assert(!JSReturnOperand.aliases(ReturnReg),
904 "ReturnReg available as scratch after scripted calls");
905 masm.switchToRealm(cx_->realm(), ReturnReg);
906 }
907
908 masm.storeCallResultValue(output);
909 masm.freeStack(masm.framePushed() - framePushedBefore);
910 return true;
911 }
912
emitCallInlinedGetterResult(ValOperandId receiverId,uint32_t getterOffset,uint32_t icScriptOffset,bool sameRealm,uint32_t nargsAndFlagsOffset)913 bool IonCacheIRCompiler::emitCallInlinedGetterResult(
914 ValOperandId receiverId, uint32_t getterOffset, uint32_t icScriptOffset,
915 bool sameRealm, uint32_t nargsAndFlagsOffset) {
916 MOZ_CRASH("Trial inlining not supported in Ion");
917 }
918
emitCallNativeGetterResult(ValOperandId receiverId,uint32_t getterOffset,bool sameRealm,uint32_t nargsAndFlagsOffset)919 bool IonCacheIRCompiler::emitCallNativeGetterResult(
920 ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
921 uint32_t nargsAndFlagsOffset) {
922 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
923 AutoSaveLiveRegisters save(*this);
924 AutoOutputRegister output(*this);
925
926 ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
927
928 JSFunction* target = &objectStubField(getterOffset)->as<JSFunction>();
929 MOZ_ASSERT(target->isNativeFun());
930
931 AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
932 AutoScratchRegister argUintN(allocator, masm);
933 AutoScratchRegister argVp(allocator, masm);
934 AutoScratchRegister scratch(allocator, masm);
935
936 allocator.discardStack(masm);
937
938 // Native functions have the signature:
939 // bool (*)(JSContext*, unsigned, Value* vp)
940 // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
941 // are the function arguments.
942
943 // Construct vp array:
944 // Push receiver value for |this|
945 masm.Push(receiver);
946 // Push callee/outparam.
947 masm.Push(ObjectValue(*target));
948
949 // Preload arguments into registers.
950 masm.loadJSContext(argJSContext);
951 masm.move32(Imm32(0), argUintN);
952 masm.moveStackPtrTo(argVp.get());
953
954 // Push marking data for later use.
955 masm.Push(argUintN);
956 pushStubCodePointer();
957
958 if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
959 return false;
960 }
961 masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLNative);
962
963 if (!sameRealm) {
964 masm.switchToRealm(target->realm(), scratch);
965 }
966
967 // Construct and execute call.
968 masm.setupUnalignedABICall(scratch);
969 masm.passABIArg(argJSContext);
970 masm.passABIArg(argUintN);
971 masm.passABIArg(argVp);
972 masm.callWithABI(DynamicFunction<JSNative>(target->native()), MoveOp::GENERAL,
973 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
974
975 // Test for failure.
976 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
977
978 if (!sameRealm) {
979 masm.switchToRealm(cx_->realm(), ReturnReg);
980 }
981
982 // Load the outparam vp[0] into output register(s).
983 Address outparam(masm.getStackPointer(),
984 IonOOLNativeExitFrameLayout::offsetOfResult());
985 masm.loadValue(outparam, output.valueReg());
986
987 if (JitOptions.spectreJitToCxxCalls) {
988 masm.speculationBarrier();
989 }
990
991 masm.adjustStack(IonOOLNativeExitFrameLayout::Size(0));
992 return true;
993 }
994
emitCallDOMGetterResult(ObjOperandId objId,uint32_t jitInfoOffset)995 bool IonCacheIRCompiler::emitCallDOMGetterResult(ObjOperandId objId,
996 uint32_t jitInfoOffset) {
997 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
998 AutoSaveLiveRegisters save(*this);
999 AutoOutputRegister output(*this);
1000
1001 Register obj = allocator.useRegister(masm, objId);
1002
1003 const JSJitInfo* info = rawPointerStubField<const JSJitInfo*>(jitInfoOffset);
1004
1005 allocator.discardStack(masm);
1006 prepareVMCall(masm, save);
1007
1008 masm.Push(obj);
1009 masm.Push(ImmPtr(info));
1010
1011 using Fn =
1012 bool (*)(JSContext*, const JSJitInfo*, HandleObject, MutableHandleValue);
1013 callVM<Fn, jit::CallDOMGetter>(masm);
1014
1015 masm.storeCallResultValue(output);
1016 return true;
1017 }
1018
emitCallDOMSetter(ObjOperandId objId,uint32_t jitInfoOffset,ValOperandId rhsId)1019 bool IonCacheIRCompiler::emitCallDOMSetter(ObjOperandId objId,
1020 uint32_t jitInfoOffset,
1021 ValOperandId rhsId) {
1022 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1023 AutoSaveLiveRegisters save(*this);
1024
1025 Register obj = allocator.useRegister(masm, objId);
1026 ValueOperand val = allocator.useValueRegister(masm, rhsId);
1027
1028 const JSJitInfo* info = rawPointerStubField<const JSJitInfo*>(jitInfoOffset);
1029
1030 allocator.discardStack(masm);
1031 prepareVMCall(masm, save);
1032
1033 masm.Push(val);
1034 masm.Push(obj);
1035 masm.Push(ImmPtr(info));
1036
1037 using Fn = bool (*)(JSContext*, const JSJitInfo*, HandleObject, HandleValue);
1038 callVM<Fn, jit::CallDOMSetter>(masm);
1039 return true;
1040 }
1041
emitProxyGetResult(ObjOperandId objId,uint32_t idOffset)1042 bool IonCacheIRCompiler::emitProxyGetResult(ObjOperandId objId,
1043 uint32_t idOffset) {
1044 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1045 AutoSaveLiveRegisters save(*this);
1046 AutoOutputRegister output(*this);
1047
1048 Register obj = allocator.useRegister(masm, objId);
1049 jsid id = idStubField(idOffset);
1050
1051 // ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id,
1052 // MutableHandleValue vp)
1053 AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
1054 AutoScratchRegister argProxy(allocator, masm);
1055 AutoScratchRegister argId(allocator, masm);
1056 AutoScratchRegister argVp(allocator, masm);
1057 AutoScratchRegister scratch(allocator, masm);
1058
1059 allocator.discardStack(masm);
1060
1061 // Push stubCode for marking.
1062 pushStubCodePointer();
1063
1064 // Push args on stack first so we can take pointers to make handles.
1065 masm.Push(UndefinedValue());
1066 masm.moveStackPtrTo(argVp.get());
1067
1068 masm.Push(id, scratch);
1069 masm.moveStackPtrTo(argId.get());
1070
1071 // Push the proxy. Also used as receiver.
1072 masm.Push(obj);
1073 masm.moveStackPtrTo(argProxy.get());
1074
1075 masm.loadJSContext(argJSContext);
1076
1077 if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
1078 return false;
1079 }
1080 masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLProxy);
1081
1082 // Make the call.
1083 using Fn = bool (*)(JSContext * cx, HandleObject proxy, HandleId id,
1084 MutableHandleValue vp);
1085 masm.setupUnalignedABICall(scratch);
1086 masm.passABIArg(argJSContext);
1087 masm.passABIArg(argProxy);
1088 masm.passABIArg(argId);
1089 masm.passABIArg(argVp);
1090 masm.callWithABI<Fn, ProxyGetProperty>(
1091 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1092
1093 // Test for failure.
1094 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
1095
1096 // Load the outparam vp[0] into output register(s).
1097 Address outparam(masm.getStackPointer(),
1098 IonOOLProxyExitFrameLayout::offsetOfResult());
1099 masm.loadValue(outparam, output.valueReg());
1100
1101 // Spectre mitigation in case of speculative execution within C++ code.
1102 if (JitOptions.spectreJitToCxxCalls) {
1103 masm.speculationBarrier();
1104 }
1105
1106 // masm.leaveExitFrame & pop locals
1107 masm.adjustStack(IonOOLProxyExitFrameLayout::Size());
1108 return true;
1109 }
1110
emitFrameIsConstructingResult()1111 bool IonCacheIRCompiler::emitFrameIsConstructingResult() {
1112 MOZ_CRASH("Baseline-specific op");
1113 }
1114
emitLoadEnvironmentFixedSlotResult(ObjOperandId objId,uint32_t offsetOffset)1115 bool IonCacheIRCompiler::emitLoadEnvironmentFixedSlotResult(
1116 ObjOperandId objId, uint32_t offsetOffset) {
1117 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1118 AutoOutputRegister output(*this);
1119 Register obj = allocator.useRegister(masm, objId);
1120 int32_t offset = int32StubField(offsetOffset);
1121
1122 FailurePath* failure;
1123 if (!addFailurePath(&failure)) {
1124 return false;
1125 }
1126
1127 // Check for uninitialized lexicals.
1128 Address slot(obj, offset);
1129 masm.branchTestMagic(Assembler::Equal, slot, failure->label());
1130
1131 // Load the value.
1132 masm.loadTypedOrValue(slot, output);
1133 return true;
1134 }
1135
emitLoadEnvironmentDynamicSlotResult(ObjOperandId objId,uint32_t offsetOffset)1136 bool IonCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult(
1137 ObjOperandId objId, uint32_t offsetOffset) {
1138 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1139 AutoOutputRegister output(*this);
1140 Register obj = allocator.useRegister(masm, objId);
1141 int32_t offset = int32StubField(offsetOffset);
1142 AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
1143
1144 FailurePath* failure;
1145 if (!addFailurePath(&failure)) {
1146 return false;
1147 }
1148
1149 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
1150
1151 // Check for uninitialized lexicals.
1152 Address slot(scratch, offset);
1153 masm.branchTestMagic(Assembler::Equal, slot, failure->label());
1154
1155 // Load the value.
1156 masm.loadTypedOrValue(slot, output);
1157 return true;
1158 }
1159
emitLoadConstantStringResult(uint32_t strOffset)1160 bool IonCacheIRCompiler::emitLoadConstantStringResult(uint32_t strOffset) {
1161 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1162 MOZ_CRASH("not used in ion");
1163 }
1164
emitCompareStringResult(JSOp op,StringOperandId lhsId,StringOperandId rhsId)1165 bool IonCacheIRCompiler::emitCompareStringResult(JSOp op, StringOperandId lhsId,
1166 StringOperandId rhsId) {
1167 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1168 AutoSaveLiveRegisters save(*this);
1169 AutoOutputRegister output(*this);
1170
1171 Register left = allocator.useRegister(masm, lhsId);
1172 Register right = allocator.useRegister(masm, rhsId);
1173
1174 allocator.discardStack(masm);
1175
1176 Label slow, done;
1177 MOZ_ASSERT(!output.hasValue());
1178 masm.compareStrings(op, left, right, output.typedReg().gpr(), &slow);
1179
1180 masm.jump(&done);
1181 masm.bind(&slow);
1182
1183 prepareVMCall(masm, save);
1184
1185 // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
1186 // - |left <= right| is implemented as |right >= left|.
1187 // - |left > right| is implemented as |right < left|.
1188 if (op == JSOp::Le || op == JSOp::Gt) {
1189 masm.Push(left);
1190 masm.Push(right);
1191 } else {
1192 masm.Push(right);
1193 masm.Push(left);
1194 }
1195
1196 using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
1197 if (op == JSOp::Eq || op == JSOp::StrictEq) {
1198 callVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(masm);
1199 } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
1200 callVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(masm);
1201 } else if (op == JSOp::Lt || op == JSOp::Gt) {
1202 callVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(masm);
1203 } else {
1204 MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
1205 callVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(masm);
1206 }
1207
1208 masm.storeCallBoolResult(output.typedReg().gpr());
1209 masm.bind(&done);
1210 return true;
1211 }
1212
emitStoreFixedSlot(ObjOperandId objId,uint32_t offsetOffset,ValOperandId rhsId)1213 bool IonCacheIRCompiler::emitStoreFixedSlot(ObjOperandId objId,
1214 uint32_t offsetOffset,
1215 ValOperandId rhsId) {
1216 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1217 Register obj = allocator.useRegister(masm, objId);
1218 int32_t offset = int32StubField(offsetOffset);
1219 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1220 AutoScratchRegister scratch(allocator, masm);
1221
1222 Address slot(obj, offset);
1223 EmitPreBarrier(masm, slot, MIRType::Value);
1224 masm.storeConstantOrRegister(val, slot);
1225 emitPostBarrierSlot(obj, val, scratch);
1226 return true;
1227 }
1228
emitStoreDynamicSlot(ObjOperandId objId,uint32_t offsetOffset,ValOperandId rhsId)1229 bool IonCacheIRCompiler::emitStoreDynamicSlot(ObjOperandId objId,
1230 uint32_t offsetOffset,
1231 ValOperandId rhsId) {
1232 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1233 Register obj = allocator.useRegister(masm, objId);
1234 int32_t offset = int32StubField(offsetOffset);
1235 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1236 AutoScratchRegister scratch(allocator, masm);
1237
1238 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
1239 Address slot(scratch, offset);
1240 EmitPreBarrier(masm, slot, MIRType::Value);
1241 masm.storeConstantOrRegister(val, slot);
1242 emitPostBarrierSlot(obj, val, scratch);
1243 return true;
1244 }
1245
emitAddAndStoreSlotShared(CacheOp op,ObjOperandId objId,uint32_t offsetOffset,ValOperandId rhsId,uint32_t newShapeOffset,Maybe<uint32_t> numNewSlotsOffset)1246 bool IonCacheIRCompiler::emitAddAndStoreSlotShared(
1247 CacheOp op, ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
1248 uint32_t newShapeOffset, Maybe<uint32_t> numNewSlotsOffset) {
1249 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1250 Register obj = allocator.useRegister(masm, objId);
1251 int32_t offset = int32StubField(offsetOffset);
1252 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1253
1254 AutoScratchRegister scratch1(allocator, masm);
1255
1256 Maybe<AutoScratchRegister> scratch2;
1257 if (op == CacheOp::AllocateAndStoreDynamicSlot) {
1258 scratch2.emplace(allocator, masm);
1259 }
1260
1261 Shape* newShape = shapeStubField(newShapeOffset);
1262
1263 if (op == CacheOp::AllocateAndStoreDynamicSlot) {
1264 // We have to (re)allocate dynamic slots. Do this first, as it's the
1265 // only fallible operation here. Note that growSlotsPure is
1266 // fallible but does not GC.
1267
1268 FailurePath* failure;
1269 if (!addFailurePath(&failure)) {
1270 return false;
1271 }
1272
1273 int32_t numNewSlots = int32StubField(*numNewSlotsOffset);
1274 MOZ_ASSERT(numNewSlots > 0);
1275
1276 LiveRegisterSet save(GeneralRegisterSet::Volatile(),
1277 liveVolatileFloatRegs());
1278 masm.PushRegsInMask(save);
1279
1280 using Fn = bool (*)(JSContext * cx, NativeObject * obj, uint32_t newCount);
1281 masm.setupUnalignedABICall(scratch1);
1282 masm.loadJSContext(scratch1);
1283 masm.passABIArg(scratch1);
1284 masm.passABIArg(obj);
1285 masm.move32(Imm32(numNewSlots), scratch2.ref());
1286 masm.passABIArg(scratch2.ref());
1287 masm.callWithABI<Fn, NativeObject::growSlotsPure>();
1288 masm.mov(ReturnReg, scratch1);
1289
1290 LiveRegisterSet ignore;
1291 ignore.add(scratch1);
1292 masm.PopRegsInMaskIgnore(save, ignore);
1293
1294 masm.branchIfFalseBool(scratch1, failure->label());
1295 }
1296
1297 // Update the object's shape.
1298 masm.storeObjShape(newShape, obj,
1299 [](MacroAssembler& masm, const Address& addr) {
1300 EmitPreBarrier(masm, addr, MIRType::Shape);
1301 });
1302
1303 // Perform the store. No pre-barrier required since this is a new
1304 // initialization.
1305 if (op == CacheOp::AddAndStoreFixedSlot) {
1306 Address slot(obj, offset);
1307 masm.storeConstantOrRegister(val, slot);
1308 } else {
1309 MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
1310 op == CacheOp::AllocateAndStoreDynamicSlot);
1311 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
1312 Address slot(scratch1, offset);
1313 masm.storeConstantOrRegister(val, slot);
1314 }
1315
1316 emitPostBarrierSlot(obj, val, scratch1);
1317
1318 return true;
1319 }
1320
emitAddAndStoreFixedSlot(ObjOperandId objId,uint32_t offsetOffset,ValOperandId rhsId,uint32_t newShapeOffset)1321 bool IonCacheIRCompiler::emitAddAndStoreFixedSlot(ObjOperandId objId,
1322 uint32_t offsetOffset,
1323 ValOperandId rhsId,
1324 uint32_t newShapeOffset) {
1325 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1326 Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
1327 return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot, objId,
1328 offsetOffset, rhsId, newShapeOffset,
1329 numNewSlotsOffset);
1330 }
1331
emitAddAndStoreDynamicSlot(ObjOperandId objId,uint32_t offsetOffset,ValOperandId rhsId,uint32_t newShapeOffset)1332 bool IonCacheIRCompiler::emitAddAndStoreDynamicSlot(ObjOperandId objId,
1333 uint32_t offsetOffset,
1334 ValOperandId rhsId,
1335 uint32_t newShapeOffset) {
1336 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1337 Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
1338 return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot, objId,
1339 offsetOffset, rhsId, newShapeOffset,
1340 numNewSlotsOffset);
1341 }
1342
emitAllocateAndStoreDynamicSlot(ObjOperandId objId,uint32_t offsetOffset,ValOperandId rhsId,uint32_t newShapeOffset,uint32_t numNewSlotsOffset)1343 bool IonCacheIRCompiler::emitAllocateAndStoreDynamicSlot(
1344 ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
1345 uint32_t newShapeOffset, uint32_t numNewSlotsOffset) {
1346 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1347 return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot, objId,
1348 offsetOffset, rhsId, newShapeOffset,
1349 mozilla::Some(numNewSlotsOffset));
1350 }
1351
emitLoadStringCharResult(StringOperandId strId,Int32OperandId indexId)1352 bool IonCacheIRCompiler::emitLoadStringCharResult(StringOperandId strId,
1353 Int32OperandId indexId) {
1354 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1355 AutoOutputRegister output(*this);
1356 Register str = allocator.useRegister(masm, strId);
1357 Register index = allocator.useRegister(masm, indexId);
1358 AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
1359 AutoScratchRegister scratch2(allocator, masm);
1360
1361 FailurePath* failure;
1362 if (!addFailurePath(&failure)) {
1363 return false;
1364 }
1365
1366 // Bounds check, load string char.
1367 masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
1368 scratch1, failure->label());
1369 masm.loadStringChar(str, index, scratch1, scratch2, failure->label());
1370
1371 // Load StaticString for this char. For larger code units perform a VM call.
1372 Label vmCall;
1373 masm.boundsCheck32PowerOfTwo(scratch1, StaticStrings::UNIT_STATIC_LIMIT,
1374 &vmCall);
1375 masm.movePtr(ImmPtr(&cx_->staticStrings().unitStaticTable), scratch2);
1376 masm.loadPtr(BaseIndex(scratch2, scratch1, ScalePointer), scratch2);
1377
1378 Label done;
1379 masm.jump(&done);
1380
1381 {
1382 masm.bind(&vmCall);
1383
1384 // FailurePath and AutoSaveLiveRegisters don't get along very well. Both are
1385 // modifying the stack and expect that no other stack manipulations are
1386 // made. Therefore we need to use an ABI call instead of a VM call here.
1387
1388 LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
1389 liveVolatileFloatRegs());
1390 volatileRegs.takeUnchecked(scratch1);
1391 volatileRegs.takeUnchecked(scratch2);
1392 volatileRegs.takeUnchecked(output);
1393 masm.PushRegsInMask(volatileRegs);
1394
1395 using Fn = JSLinearString* (*)(JSContext * cx, int32_t code);
1396 masm.setupUnalignedABICall(scratch2);
1397 masm.loadJSContext(scratch2);
1398 masm.passABIArg(scratch2);
1399 masm.passABIArg(scratch1);
1400 masm.callWithABI<Fn, jit::StringFromCharCodeNoGC>();
1401 masm.storeCallPointerResult(scratch2);
1402
1403 masm.PopRegsInMask(volatileRegs);
1404
1405 masm.branchPtr(Assembler::Equal, scratch2, ImmWord(0), failure->label());
1406 }
1407
1408 masm.bind(&done);
1409 masm.tagValue(JSVAL_TYPE_STRING, scratch2, output.valueReg());
1410 return true;
1411 }
1412
emitCallNativeSetter(ObjOperandId receiverId,uint32_t setterOffset,ValOperandId rhsId,bool sameRealm,uint32_t nargsAndFlagsOffset)1413 bool IonCacheIRCompiler::emitCallNativeSetter(ObjOperandId receiverId,
1414 uint32_t setterOffset,
1415 ValOperandId rhsId,
1416 bool sameRealm,
1417 uint32_t nargsAndFlagsOffset) {
1418 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1419 AutoSaveLiveRegisters save(*this);
1420
1421 Register receiver = allocator.useRegister(masm, receiverId);
1422 JSFunction* target = &objectStubField(setterOffset)->as<JSFunction>();
1423 MOZ_ASSERT(target->isNativeFun());
1424 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1425
1426 AutoScratchRegister argJSContext(allocator, masm);
1427 AutoScratchRegister argVp(allocator, masm);
1428 AutoScratchRegister argUintN(allocator, masm);
1429 AutoScratchRegister scratch(allocator, masm);
1430
1431 allocator.discardStack(masm);
1432
1433 // Set up the call:
1434 // bool (*)(JSContext*, unsigned, Value* vp)
1435 // vp[0] is callee/outparam
1436 // vp[1] is |this|
1437 // vp[2] is the value
1438
1439 // Build vp and move the base into argVpReg.
1440 masm.Push(val);
1441 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(receiver)));
1442 masm.Push(ObjectValue(*target));
1443 masm.moveStackPtrTo(argVp.get());
1444
1445 // Preload other regs.
1446 masm.loadJSContext(argJSContext);
1447 masm.move32(Imm32(1), argUintN);
1448
1449 // Push marking data for later use.
1450 masm.Push(argUintN);
1451 pushStubCodePointer();
1452
1453 if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
1454 return false;
1455 }
1456 masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLNative);
1457
1458 if (!sameRealm) {
1459 masm.switchToRealm(target->realm(), scratch);
1460 }
1461
1462 // Make the call.
1463 masm.setupUnalignedABICall(scratch);
1464 masm.passABIArg(argJSContext);
1465 masm.passABIArg(argUintN);
1466 masm.passABIArg(argVp);
1467 masm.callWithABI(DynamicFunction<JSNative>(target->native()), MoveOp::GENERAL,
1468 CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1469
1470 // Test for failure.
1471 masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
1472
1473 if (!sameRealm) {
1474 masm.switchToRealm(cx_->realm(), ReturnReg);
1475 }
1476
1477 masm.adjustStack(IonOOLNativeExitFrameLayout::Size(1));
1478 return true;
1479 }
1480
emitCallScriptedSetter(ObjOperandId receiverId,uint32_t setterOffset,ValOperandId rhsId,bool sameRealm,uint32_t nargsAndFlagsOffset)1481 bool IonCacheIRCompiler::emitCallScriptedSetter(ObjOperandId receiverId,
1482 uint32_t setterOffset,
1483 ValOperandId rhsId,
1484 bool sameRealm,
1485 uint32_t nargsAndFlagsOffset) {
1486 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1487 AutoSaveLiveRegisters save(*this);
1488
1489 Register receiver = allocator.useRegister(masm, receiverId);
1490 JSFunction* target = &objectStubField(setterOffset)->as<JSFunction>();
1491 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1492
1493 MOZ_ASSERT(sameRealm == (cx_->realm() == target->realm()));
1494
1495 AutoScratchRegister scratch(allocator, masm);
1496
1497 allocator.discardStack(masm);
1498
1499 uint32_t framePushedBefore = masm.framePushed();
1500
1501 // Construct IonICCallFrameLayout.
1502 uint32_t descriptor = MakeFrameDescriptor(
1503 masm.framePushed(), FrameType::IonJS, IonICCallFrameLayout::Size());
1504 pushStubCodePointer();
1505 masm.Push(Imm32(descriptor));
1506 masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
1507
1508 // The JitFrameLayout pushed below will be aligned to JitStackAlignment,
1509 // so we just have to make sure the stack is aligned after we push the
1510 // |this| + argument Values.
1511 size_t numArgs = std::max<size_t>(1, target->nargs());
1512 uint32_t argSize = (numArgs + 1) * sizeof(Value);
1513 uint32_t padding =
1514 ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
1515 MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
1516 MOZ_ASSERT(padding < JitStackAlignment);
1517 masm.reserveStack(padding);
1518
1519 for (size_t i = 1; i < target->nargs(); i++) {
1520 masm.Push(UndefinedValue());
1521 }
1522 masm.Push(val);
1523 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(receiver)));
1524
1525 if (!sameRealm) {
1526 masm.switchToRealm(target->realm(), scratch);
1527 }
1528
1529 masm.movePtr(ImmGCPtr(target), scratch);
1530
1531 descriptor = MakeFrameDescriptor(argSize + padding, FrameType::IonICCall,
1532 JitFrameLayout::Size());
1533 masm.Push(Imm32(1)); // argc
1534 masm.Push(scratch);
1535 masm.Push(Imm32(descriptor));
1536
1537 // Check stack alignment. Add sizeof(uintptr_t) for the return address.
1538 MOZ_ASSERT(((masm.framePushed() + sizeof(uintptr_t)) % JitStackAlignment) ==
1539 0);
1540
1541 MOZ_ASSERT(target->hasJitEntry());
1542 masm.loadJitCodeRaw(scratch, scratch);
1543 masm.callJit(scratch);
1544
1545 if (!sameRealm) {
1546 masm.switchToRealm(cx_->realm(), ReturnReg);
1547 }
1548
1549 masm.freeStack(masm.framePushed() - framePushedBefore);
1550 return true;
1551 }
1552
emitCallInlinedSetter(ObjOperandId receiverId,uint32_t setterOffset,ValOperandId rhsId,uint32_t icScriptOffset,bool sameRealm,uint32_t nargsAndFlagsOffset)1553 bool IonCacheIRCompiler::emitCallInlinedSetter(
1554 ObjOperandId receiverId, uint32_t setterOffset, ValOperandId rhsId,
1555 uint32_t icScriptOffset, bool sameRealm, uint32_t nargsAndFlagsOffset) {
1556 MOZ_CRASH("Trial inlining not supported in Ion");
1557 }
1558
emitCallSetArrayLength(ObjOperandId objId,bool strict,ValOperandId rhsId)1559 bool IonCacheIRCompiler::emitCallSetArrayLength(ObjOperandId objId, bool strict,
1560 ValOperandId rhsId) {
1561 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1562 AutoSaveLiveRegisters save(*this);
1563
1564 Register obj = allocator.useRegister(masm, objId);
1565 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1566
1567 allocator.discardStack(masm);
1568 prepareVMCall(masm, save);
1569
1570 masm.Push(Imm32(strict));
1571 masm.Push(val);
1572 masm.Push(obj);
1573
1574 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
1575 callVM<Fn, jit::SetArrayLength>(masm);
1576 return true;
1577 }
1578
emitProxySet(ObjOperandId objId,uint32_t idOffset,ValOperandId rhsId,bool strict)1579 bool IonCacheIRCompiler::emitProxySet(ObjOperandId objId, uint32_t idOffset,
1580 ValOperandId rhsId, bool strict) {
1581 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1582 AutoSaveLiveRegisters save(*this);
1583
1584 Register obj = allocator.useRegister(masm, objId);
1585 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1586 jsid id = idStubField(idOffset);
1587
1588 AutoScratchRegister scratch(allocator, masm);
1589
1590 allocator.discardStack(masm);
1591 prepareVMCall(masm, save);
1592
1593 masm.Push(Imm32(strict));
1594 masm.Push(val);
1595 masm.Push(id, scratch);
1596 masm.Push(obj);
1597
1598 using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
1599 callVM<Fn, ProxySetProperty>(masm);
1600 return true;
1601 }
1602
emitProxySetByValue(ObjOperandId objId,ValOperandId idId,ValOperandId rhsId,bool strict)1603 bool IonCacheIRCompiler::emitProxySetByValue(ObjOperandId objId,
1604 ValOperandId idId,
1605 ValOperandId rhsId, bool strict) {
1606 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1607 AutoSaveLiveRegisters save(*this);
1608
1609 Register obj = allocator.useRegister(masm, objId);
1610 ConstantOrRegister idVal = allocator.useConstantOrRegister(masm, idId);
1611 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1612
1613 allocator.discardStack(masm);
1614 prepareVMCall(masm, save);
1615
1616 masm.Push(Imm32(strict));
1617 masm.Push(val);
1618 masm.Push(idVal);
1619 masm.Push(obj);
1620
1621 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
1622 callVM<Fn, ProxySetPropertyByValue>(masm);
1623 return true;
1624 }
1625
emitCallAddOrUpdateSparseElementHelper(ObjOperandId objId,Int32OperandId idId,ValOperandId rhsId,bool strict)1626 bool IonCacheIRCompiler::emitCallAddOrUpdateSparseElementHelper(
1627 ObjOperandId objId, Int32OperandId idId, ValOperandId rhsId, bool strict) {
1628 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1629 AutoSaveLiveRegisters save(*this);
1630
1631 Register obj = allocator.useRegister(masm, objId);
1632 Register id = allocator.useRegister(masm, idId);
1633 ValueOperand val = allocator.useValueRegister(masm, rhsId);
1634
1635 allocator.discardStack(masm);
1636 prepareVMCall(masm, save);
1637
1638 masm.Push(Imm32(strict));
1639 masm.Push(val);
1640 masm.Push(id);
1641 masm.Push(obj);
1642
1643 using Fn = bool (*)(JSContext * cx, HandleArrayObject obj, int32_t int_id,
1644 HandleValue v, bool strict);
1645 callVM<Fn, AddOrUpdateSparseElementHelper>(masm);
1646 return true;
1647 }
1648
emitMegamorphicSetElement(ObjOperandId objId,ValOperandId idId,ValOperandId rhsId,bool strict)1649 bool IonCacheIRCompiler::emitMegamorphicSetElement(ObjOperandId objId,
1650 ValOperandId idId,
1651 ValOperandId rhsId,
1652 bool strict) {
1653 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1654 AutoSaveLiveRegisters save(*this);
1655
1656 Register obj = allocator.useRegister(masm, objId);
1657 ConstantOrRegister idVal = allocator.useConstantOrRegister(masm, idId);
1658 ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
1659
1660 allocator.discardStack(masm);
1661 prepareVMCall(masm, save);
1662
1663 masm.Push(Imm32(strict));
1664 masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
1665 masm.Push(val);
1666 masm.Push(idVal);
1667 masm.Push(obj);
1668
1669 using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
1670 HandleValue, bool);
1671 callVM<Fn, SetObjectElementWithReceiver>(masm);
1672 return true;
1673 }
1674
emitReturnFromIC()1675 bool IonCacheIRCompiler::emitReturnFromIC() {
1676 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1677 if (!savedLiveRegs_) {
1678 allocator.restoreInputState(masm);
1679 }
1680
1681 uint8_t* rejoinAddr = ic_->rejoinAddr(ionScript_);
1682 masm.jump(ImmPtr(rejoinAddr));
1683 return true;
1684 }
1685
emitGuardAndGetIterator(ObjOperandId objId,uint32_t iterOffset,uint32_t enumeratorsAddrOffset,ObjOperandId resultId)1686 bool IonCacheIRCompiler::emitGuardAndGetIterator(ObjOperandId objId,
1687 uint32_t iterOffset,
1688 uint32_t enumeratorsAddrOffset,
1689 ObjOperandId resultId) {
1690 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1691 Register obj = allocator.useRegister(masm, objId);
1692
1693 AutoScratchRegister scratch1(allocator, masm);
1694 AutoScratchRegister scratch2(allocator, masm);
1695 AutoScratchRegister niScratch(allocator, masm);
1696
1697 PropertyIteratorObject* iterobj =
1698 &objectStubField(iterOffset)->as<PropertyIteratorObject>();
1699 NativeIterator** enumerators =
1700 rawPointerStubField<NativeIterator**>(enumeratorsAddrOffset);
1701
1702 Register output = allocator.defineRegister(masm, resultId);
1703
1704 FailurePath* failure;
1705 if (!addFailurePath(&failure)) {
1706 return false;
1707 }
1708
1709 // Load our PropertyIteratorObject* and its NativeIterator.
1710 masm.movePtr(ImmGCPtr(iterobj), output);
1711 masm.loadObjPrivate(output, PropertyIteratorObject::NUM_FIXED_SLOTS,
1712 niScratch);
1713
1714 // Ensure the iterator is reusable: see NativeIterator::isReusable.
1715 masm.branchIfNativeIteratorNotReusable(niScratch, failure->label());
1716
1717 // Pre-write barrier for store to 'objectBeingIterated_'.
1718 Address iterObjAddr(niScratch, NativeIterator::offsetOfObjectBeingIterated());
1719 EmitPreBarrier(masm, iterObjAddr, MIRType::Object);
1720
1721 // Mark iterator as active.
1722 Address iterFlagsAddr(niScratch, NativeIterator::offsetOfFlagsAndCount());
1723 masm.storePtr(obj, iterObjAddr);
1724 masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
1725
1726 // Post-write barrier for stores to 'objectBeingIterated_'.
1727 emitPostBarrierSlot(output,
1728 TypedOrValueRegister(MIRType::Object, AnyRegister(obj)),
1729 scratch1);
1730
1731 // Chain onto the active iterator stack.
1732 masm.loadPtr(AbsoluteAddress(enumerators), scratch1);
1733 emitRegisterEnumerator(scratch1, niScratch, scratch2);
1734
1735 return true;
1736 }
1737
emitGuardDOMExpandoMissingOrGuardShape(ValOperandId expandoId,uint32_t shapeOffset)1738 bool IonCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape(
1739 ValOperandId expandoId, uint32_t shapeOffset) {
1740 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1741 ValueOperand val = allocator.useValueRegister(masm, expandoId);
1742 Shape* shape = shapeStubField(shapeOffset);
1743
1744 AutoScratchRegister objScratch(allocator, masm);
1745
1746 FailurePath* failure;
1747 if (!addFailurePath(&failure)) {
1748 return false;
1749 }
1750
1751 Label done;
1752 masm.branchTestUndefined(Assembler::Equal, val, &done);
1753
1754 masm.debugAssertIsObject(val);
1755 masm.unboxObject(val, objScratch);
1756 // The expando object is not used in this case, so we don't need Spectre
1757 // mitigations.
1758 masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch,
1759 shape, failure->label());
1760
1761 masm.bind(&done);
1762 return true;
1763 }
1764
emitLoadDOMExpandoValueGuardGeneration(ObjOperandId objId,uint32_t expandoAndGenerationOffset,uint32_t generationOffset,ValOperandId resultId)1765 bool IonCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration(
1766 ObjOperandId objId, uint32_t expandoAndGenerationOffset,
1767 uint32_t generationOffset, ValOperandId resultId) {
1768 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1769 Register obj = allocator.useRegister(masm, objId);
1770 ExpandoAndGeneration* expandoAndGeneration =
1771 rawPointerStubField<ExpandoAndGeneration*>(expandoAndGenerationOffset);
1772 uint64_t generation = rawInt64StubField<uint64_t>(generationOffset);
1773
1774 ValueOperand output = allocator.defineValueRegister(masm, resultId);
1775
1776 FailurePath* failure;
1777 if (!addFailurePath(&failure)) {
1778 return false;
1779 }
1780
1781 masm.loadDOMExpandoValueGuardGeneration(obj, output, expandoAndGeneration,
1782 generation, failure->label());
1783 return true;
1784 }
1785
attachCacheIRStub(JSContext * cx,const CacheIRWriter & writer,CacheKind kind,IonScript * ionScript,bool * attached)1786 void IonIC::attachCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
1787 CacheKind kind, IonScript* ionScript,
1788 bool* attached) {
1789 // We shouldn't GC or report OOM (or any other exception) here.
1790 AutoAssertNoPendingException aanpe(cx);
1791 JS::AutoCheckCannotGC nogc;
1792
1793 MOZ_ASSERT(!*attached);
1794
1795 // Do nothing if the IR generator failed or triggered a GC that invalidated
1796 // the script.
1797 if (writer.failed() || ionScript->invalidated()) {
1798 return;
1799 }
1800
1801 JitZone* jitZone = cx->zone()->jitZone();
1802
1803 constexpr uint32_t stubDataOffset = sizeof(IonICStub);
1804 static_assert(stubDataOffset % sizeof(uint64_t) == 0,
1805 "Stub fields must be aligned");
1806
1807 // Try to reuse a previously-allocated CacheIRStubInfo.
1808 CacheIRStubKey::Lookup lookup(kind, ICStubEngine::IonIC, writer.codeStart(),
1809 writer.codeLength());
1810 CacheIRStubInfo* stubInfo = jitZone->getIonCacheIRStubInfo(lookup);
1811 if (!stubInfo) {
1812 // Allocate the shared CacheIRStubInfo. Note that the
1813 // putIonCacheIRStubInfo call below will transfer ownership to
1814 // the stub info HashSet, so we don't have to worry about freeing
1815 // it below.
1816
1817 // For Ion ICs, we don't track/use the makesGCCalls flag, so just pass true.
1818 bool makesGCCalls = true;
1819 stubInfo = CacheIRStubInfo::New(kind, ICStubEngine::IonIC, makesGCCalls,
1820 stubDataOffset, writer);
1821 if (!stubInfo) {
1822 return;
1823 }
1824
1825 CacheIRStubKey key(stubInfo);
1826 if (!jitZone->putIonCacheIRStubInfo(lookup, key)) {
1827 return;
1828 }
1829 }
1830
1831 MOZ_ASSERT(stubInfo);
1832
1833 // Ensure we don't attach duplicate stubs. This can happen if a stub failed
1834 // for some reason and the IR generator doesn't check for exactly the same
1835 // conditions.
1836 for (IonICStub* stub = firstStub_; stub; stub = stub->next()) {
1837 if (stub->stubInfo() != stubInfo) {
1838 continue;
1839 }
1840 if (!writer.stubDataEquals(stub->stubDataStart())) {
1841 continue;
1842 }
1843 return;
1844 }
1845
1846 size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
1847
1848 // Allocate the IonICStub in the optimized stub space. Ion stubs and
1849 // CacheIRStubInfo instances for Ion stubs can be purged on GC. That's okay
1850 // because the stub code is rooted separately when we make a VM call, and
1851 // stub code should never access the IonICStub after making a VM call. The
1852 // IonICStub::poison method poisons the stub to catch bugs in this area.
1853 ICStubSpace* stubSpace = cx->zone()->jitZone()->optimizedStubSpace();
1854 void* newStubMem = stubSpace->alloc(bytesNeeded);
1855 if (!newStubMem) {
1856 return;
1857 }
1858
1859 IonICStub* newStub =
1860 new (newStubMem) IonICStub(fallbackAddr(ionScript), stubInfo);
1861 writer.copyStubData(newStub->stubDataStart());
1862
1863 JitContext jctx(cx, nullptr);
1864 IonCacheIRCompiler compiler(cx, writer, this, ionScript, stubDataOffset);
1865 if (!compiler.init()) {
1866 return;
1867 }
1868
1869 JitCode* code = compiler.compile(newStub);
1870 if (!code) {
1871 return;
1872 }
1873
1874 attachStub(newStub, code);
1875 *attached = true;
1876 }
1877
emitCallStringObjectConcatResult(ValOperandId lhsId,ValOperandId rhsId)1878 bool IonCacheIRCompiler::emitCallStringObjectConcatResult(ValOperandId lhsId,
1879 ValOperandId rhsId) {
1880 JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1881 AutoSaveLiveRegisters save(*this);
1882 AutoOutputRegister output(*this);
1883
1884 ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
1885 ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
1886
1887 allocator.discardStack(masm);
1888
1889 prepareVMCall(masm, save);
1890 masm.Push(rhs);
1891 masm.Push(lhs);
1892
1893 using Fn = bool (*)(JSContext*, HandleValue, HandleValue, MutableHandleValue);
1894 callVM<Fn, DoConcatStringObject>(masm);
1895
1896 masm.storeCallResultValue(output);
1897 return true;
1898 }
1899
emitGuardFunctionScript(ObjOperandId funId,uint32_t expectedOffset,uint32_t nargsAndFlagsOffset)1900 bool IonCacheIRCompiler::emitGuardFunctionScript(ObjOperandId funId,
1901 uint32_t expectedOffset,
1902 uint32_t nargsAndFlagsOffset) {
1903 MOZ_CRASH("Call ICs not used in ion");
1904 }
1905
emitCallScriptedFunction(ObjOperandId calleeId,Int32OperandId argcId,CallFlags flags)1906 bool IonCacheIRCompiler::emitCallScriptedFunction(ObjOperandId calleeId,
1907 Int32OperandId argcId,
1908 CallFlags flags) {
1909 MOZ_CRASH("Call ICs not used in ion");
1910 }
1911
emitCallWasmFunction(ObjOperandId calleeId,Int32OperandId argcId,CallFlags flags,uint32_t funcExportOffset,uint32_t instanceOffset)1912 bool IonCacheIRCompiler::emitCallWasmFunction(ObjOperandId calleeId,
1913 Int32OperandId argcId,
1914 CallFlags flags,
1915 uint32_t funcExportOffset,
1916 uint32_t instanceOffset) {
1917 MOZ_CRASH("Call ICs not used in ion");
1918 }
1919
1920 #ifdef JS_SIMULATOR
emitCallNativeFunction(ObjOperandId calleeId,Int32OperandId argcId,CallFlags flags,uint32_t targetOffset)1921 bool IonCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
1922 Int32OperandId argcId,
1923 CallFlags flags,
1924 uint32_t targetOffset) {
1925 MOZ_CRASH("Call ICs not used in ion");
1926 }
1927
emitCallDOMFunction(ObjOperandId calleeId,Int32OperandId argcId,ObjOperandId thisObjId,CallFlags flags,uint32_t targetOffset)1928 bool IonCacheIRCompiler::emitCallDOMFunction(ObjOperandId calleeId,
1929 Int32OperandId argcId,
1930 ObjOperandId thisObjId,
1931 CallFlags flags,
1932 uint32_t targetOffset) {
1933 MOZ_CRASH("Call ICs not used in ion");
1934 }
1935 #else
emitCallNativeFunction(ObjOperandId calleeId,Int32OperandId argcId,CallFlags flags,bool ignoresReturnValue)1936 bool IonCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
1937 Int32OperandId argcId,
1938 CallFlags flags,
1939 bool ignoresReturnValue) {
1940 MOZ_CRASH("Call ICs not used in ion");
1941 }
1942
emitCallDOMFunction(ObjOperandId calleeId,Int32OperandId argcId,ObjOperandId thisObjId,CallFlags flags)1943 bool IonCacheIRCompiler::emitCallDOMFunction(ObjOperandId calleeId,
1944 Int32OperandId argcId,
1945 ObjOperandId thisObjId,
1946 CallFlags flags) {
1947 MOZ_CRASH("Call ICs not used in ion");
1948 }
1949 #endif
1950
emitCallClassHook(ObjOperandId calleeId,Int32OperandId argcId,CallFlags flags,uint32_t targetOffset)1951 bool IonCacheIRCompiler::emitCallClassHook(ObjOperandId calleeId,
1952 Int32OperandId argcId,
1953 CallFlags flags,
1954 uint32_t targetOffset) {
1955 MOZ_CRASH("Call ICs not used in ion");
1956 }
1957
emitCallInlinedFunction(ObjOperandId calleeId,Int32OperandId argcId,uint32_t icScriptOffset,CallFlags flags)1958 bool IonCacheIRCompiler::emitCallInlinedFunction(ObjOperandId calleeId,
1959 Int32OperandId argcId,
1960 uint32_t icScriptOffset,
1961 CallFlags flags) {
1962 MOZ_CRASH("Call ICs not used in ion");
1963 }
1964
emitLoadArgumentFixedSlot(ValOperandId resultId,uint8_t slotIndex)1965 bool IonCacheIRCompiler::emitLoadArgumentFixedSlot(ValOperandId resultId,
1966 uint8_t slotIndex) {
1967 MOZ_CRASH("Call ICs not used in ion");
1968 }
1969
emitLoadArgumentDynamicSlot(ValOperandId resultId,Int32OperandId argcId,uint8_t slotIndex)1970 bool IonCacheIRCompiler::emitLoadArgumentDynamicSlot(ValOperandId resultId,
1971 Int32OperandId argcId,
1972 uint8_t slotIndex) {
1973 MOZ_CRASH("Call ICs not used in ion");
1974 }
1975
emitArrayJoinResult(ObjOperandId objId,StringOperandId sepId)1976 bool IonCacheIRCompiler::emitArrayJoinResult(ObjOperandId objId,
1977 StringOperandId sepId) {
1978 MOZ_CRASH("Call ICs not used in ion");
1979 }
1980
emitPackedArraySliceResult(uint32_t templateObjectOffset,ObjOperandId arrayId,Int32OperandId beginId,Int32OperandId endId)1981 bool IonCacheIRCompiler::emitPackedArraySliceResult(
1982 uint32_t templateObjectOffset, ObjOperandId arrayId, Int32OperandId beginId,
1983 Int32OperandId endId) {
1984 MOZ_CRASH("Call ICs not used in ion");
1985 }
1986
emitIsArrayResult(ValOperandId inputId)1987 bool IonCacheIRCompiler::emitIsArrayResult(ValOperandId inputId) {
1988 MOZ_CRASH("Call ICs not used in ion");
1989 }
1990
emitIsTypedArrayResult(ObjOperandId objId,bool isPossiblyWrapped)1991 bool IonCacheIRCompiler::emitIsTypedArrayResult(ObjOperandId objId,
1992 bool isPossiblyWrapped) {
1993 MOZ_CRASH("Call ICs not used in ion");
1994 }
1995
emitStringFromCharCodeResult(Int32OperandId codeId)1996 bool IonCacheIRCompiler::emitStringFromCharCodeResult(Int32OperandId codeId) {
1997 MOZ_CRASH("Call ICs not used in ion");
1998 }
1999
emitStringFromCodePointResult(Int32OperandId codeId)2000 bool IonCacheIRCompiler::emitStringFromCodePointResult(Int32OperandId codeId) {
2001 MOZ_CRASH("Call ICs not used in ion");
2002 }
2003
emitMathRandomResult(uint32_t rngOffset)2004 bool IonCacheIRCompiler::emitMathRandomResult(uint32_t rngOffset) {
2005 MOZ_CRASH("Call ICs not used in ion");
2006 }
2007
emitReflectGetPrototypeOfResult(ObjOperandId objId)2008 bool IonCacheIRCompiler::emitReflectGetPrototypeOfResult(ObjOperandId objId) {
2009 MOZ_CRASH("Call ICs not used in ion");
2010 }
2011
emitHasClassResult(ObjOperandId objId,uint32_t claspOffset)2012 bool IonCacheIRCompiler::emitHasClassResult(ObjOperandId objId,
2013 uint32_t claspOffset) {
2014 MOZ_CRASH("Call ICs not used in ion");
2015 }
2016
emitSameValueResult(ValOperandId lhs,ValOperandId rhs)2017 bool IonCacheIRCompiler::emitSameValueResult(ValOperandId lhs,
2018 ValOperandId rhs) {
2019 MOZ_CRASH("Call ICs not used in ion");
2020 }
2021
emitNewArrayObjectResult(uint32_t arrayLength,uint32_t shapeOffset,uint32_t siteOffset)2022 bool IonCacheIRCompiler::emitNewArrayObjectResult(uint32_t arrayLength,
2023 uint32_t shapeOffset,
2024 uint32_t siteOffset) {
2025 MOZ_CRASH("NewArray ICs not used in ion");
2026 }
2027
emitNewPlainObjectResult(uint32_t numFixedSlots,uint32_t numDynamicSlots,gc::AllocKind allocKind,uint32_t shapeOffset,uint32_t siteOffset)2028 bool IonCacheIRCompiler::emitNewPlainObjectResult(uint32_t numFixedSlots,
2029 uint32_t numDynamicSlots,
2030 gc::AllocKind allocKind,
2031 uint32_t shapeOffset,
2032 uint32_t siteOffset) {
2033 MOZ_CRASH("NewObject ICs not used in ion");
2034 }
2035