1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/codegen/macro-assembler.h"
6 #include "src/codegen/register-configuration.h"
7 #include "src/codegen/safepoint-table.h"
8 #include "src/deoptimizer/deoptimizer.h"
9
10 namespace v8 {
11 namespace internal {
12
13 const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
14 const int Deoptimizer::kNonLazyDeoptExitSize = 0;
15 const int Deoptimizer::kLazyDeoptExitSize = 0;
16
17 #define __ masm->
18
19 // This code tries to be close to ia32 code so that any changes can be
20 // easily ported.
GenerateDeoptimizationEntries(MacroAssembler * masm,Isolate * isolate,DeoptimizeKind deopt_kind)21 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
22 Isolate* isolate,
23 DeoptimizeKind deopt_kind) {
24 NoRootArrayScope no_root_array(masm);
25
26 // Save all the registers onto the stack
27 const int kNumberOfRegisters = Register::kNumRegisters;
28
29 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
30
31 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
32
33 // Save all double registers before messing with them.
34 __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
35 const RegisterConfiguration* config = RegisterConfiguration::Default();
36 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
37 int code = config->GetAllocatableDoubleCode(i);
38 const DoubleRegister dreg = DoubleRegister::from_code(code);
39 int offset = code * kDoubleSize;
40 __ StoreDouble(dreg, MemOperand(sp, offset));
41 }
42
43 // Push all GPRs onto the stack
44 __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
45 __ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
46
47 __ mov(r1, Operand(ExternalReference::Create(
48 IsolateAddressId::kCEntryFPAddress, isolate)));
49 __ StoreP(fp, MemOperand(r1));
50
51 const int kSavedRegistersAreaSize =
52 (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
53
54 // The bailout id is passed using r10
55 __ LoadRR(r4, r10);
56
57 // Cleanse the Return address for 31-bit
58 __ CleanseP(r14);
59
60 // Get the address of the location in the code object (r5)(return
61 // address for lazy deoptimization) and compute the fp-to-sp delta in
62 // register r6.
63 __ LoadRR(r5, r14);
64
65 __ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
66 __ SubP(r6, fp, r6);
67
68 // Allocate a new deoptimizer object.
69 // Pass six arguments in r2 to r7.
70 __ PrepareCallCFunction(6, r7);
71 __ LoadImmP(r2, Operand::Zero());
72 Label context_check;
73 __ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
74 __ JumpIfSmi(r3, &context_check);
75 __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
76 __ bind(&context_check);
77 __ LoadImmP(r3, Operand(static_cast<int>(deopt_kind)));
78 // r4: bailout id already loaded.
79 // r5: code address or 0 already loaded.
80 // r6: Fp-to-sp delta.
81 // Parm6: isolate is passed on the stack.
82 __ mov(r7, Operand(ExternalReference::isolate_address(isolate)));
83 __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
84
85 // Call Deoptimizer::New().
86 {
87 AllowExternalCallThatCantCauseGC scope(masm);
88 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
89 }
90
91 // Preserve "deoptimizer" object in register r2 and get the input
92 // frame descriptor pointer to r3 (deoptimizer->input_);
93 __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
94
95 // Copy core registers into FrameDescription::registers_[kNumRegisters].
96 // DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
97 // __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
98 // MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
99 // Copy core registers into FrameDescription::registers_[kNumRegisters].
100 // TODO(john.yan): optimize the following code by using mvc instruction
101 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
102 for (int i = 0; i < kNumberOfRegisters; i++) {
103 int offset =
104 (i * kSystemPointerSize) + FrameDescription::registers_offset();
105 __ LoadP(r4, MemOperand(sp, i * kSystemPointerSize));
106 __ StoreP(r4, MemOperand(r3, offset));
107 }
108
109 int double_regs_offset = FrameDescription::double_registers_offset();
110 // Copy double registers to
111 // double_registers_[DoubleRegister::kNumRegisters]
112 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
113 int code = config->GetAllocatableDoubleCode(i);
114 int dst_offset = code * kDoubleSize + double_regs_offset;
115 int src_offset =
116 code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
117 // TODO(joransiu): MVC opportunity
118 __ LoadDouble(d0, MemOperand(sp, src_offset));
119 __ StoreDouble(d0, MemOperand(r3, dst_offset));
120 }
121
122 // Mark the stack as not iterable for the CPU profiler which won't be able to
123 // walk the stack without the return address.
124 {
125 UseScratchRegisterScope temps(masm);
126 Register is_iterable = temps.Acquire();
127 Register zero = r6;
128 __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
129 __ lhi(zero, Operand(0));
130 __ StoreByte(zero, MemOperand(is_iterable));
131 }
132
133 // Remove the saved registers from the stack.
134 __ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
135
136 // Compute a pointer to the unwinding limit in register r4; that is
137 // the first stack slot not part of the input frame.
138 __ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
139 __ AddP(r4, sp);
140
141 // Unwind the stack down to - but not including - the unwinding
142 // limit and copy the contents of the activation frame to the input
143 // frame description.
144 __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
145 Label pop_loop;
146 Label pop_loop_header;
147 __ b(&pop_loop_header, Label::kNear);
148 __ bind(&pop_loop);
149 __ pop(r6);
150 __ StoreP(r6, MemOperand(r5, 0));
151 __ la(r5, MemOperand(r5, kSystemPointerSize));
152 __ bind(&pop_loop_header);
153 __ CmpP(r4, sp);
154 __ bne(&pop_loop);
155
156 // Compute the output frame in the deoptimizer.
157 __ push(r2); // Preserve deoptimizer object across call.
158 // r2: deoptimizer object; r3: scratch.
159 __ PrepareCallCFunction(1, r3);
160 // Call Deoptimizer::ComputeOutputFrames().
161 {
162 AllowExternalCallThatCantCauseGC scope(masm);
163 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
164 }
165 __ pop(r2); // Restore deoptimizer object (class Deoptimizer).
166
167 __ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
168
169 // Replace the current (input) frame with the output frames.
170 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
171 // Outer loop state: r6 = current "FrameDescription** output_",
172 // r3 = one past the last FrameDescription**.
173 __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
174 __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
175 __ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2));
176 __ AddP(r3, r6, r3);
177 __ b(&outer_loop_header, Label::kNear);
178
179 __ bind(&outer_push_loop);
180 // Inner loop state: r4 = current FrameDescription*, r5 = loop index.
181 __ LoadP(r4, MemOperand(r6, 0)); // output_[ix]
182 __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
183 __ b(&inner_loop_header, Label::kNear);
184
185 __ bind(&inner_push_loop);
186 __ SubP(r5, Operand(sizeof(intptr_t)));
187 __ AddP(r8, r4, r5);
188 __ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
189 __ push(r8);
190
191 __ bind(&inner_loop_header);
192 __ CmpP(r5, Operand::Zero());
193 __ bne(&inner_push_loop); // test for gt?
194
195 __ AddP(r6, r6, Operand(kSystemPointerSize));
196 __ bind(&outer_loop_header);
197 __ CmpP(r6, r3);
198 __ blt(&outer_push_loop);
199
200 __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
201 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
202 int code = config->GetAllocatableDoubleCode(i);
203 const DoubleRegister dreg = DoubleRegister::from_code(code);
204 int src_offset = code * kDoubleSize + double_regs_offset;
205 __ ld(dreg, MemOperand(r3, src_offset));
206 }
207
208 // Push pc and continuation from the last output frame.
209 __ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
210 __ push(r8);
211 __ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
212 __ push(r8);
213
214 // Restore the registers from the last output frame.
215 __ LoadRR(r1, r4);
216 for (int i = kNumberOfRegisters - 1; i > 0; i--) {
217 int offset =
218 (i * kSystemPointerSize) + FrameDescription::registers_offset();
219 if ((restored_regs & (1 << i)) != 0) {
220 __ LoadP(ToRegister(i), MemOperand(r1, offset));
221 }
222 }
223
224 {
225 UseScratchRegisterScope temps(masm);
226 Register is_iterable = temps.Acquire();
227 Register one = r6;
228 __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
229 __ lhi(one, Operand(1));
230 __ StoreByte(one, MemOperand(is_iterable));
231 }
232
233 __ pop(ip); // get continuation, leave pc on stack
234 __ pop(r14);
235 __ Jump(ip);
236
237 __ stop();
238 }
239
GetFloatRegister(unsigned n) const240 Float32 RegisterValues::GetFloatRegister(unsigned n) const {
241 return Float32::FromBits(
242 static_cast<uint32_t>(double_registers_[n].get_bits() >> 32));
243 }
244
SetCallerPc(unsigned offset,intptr_t value)245 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
246 SetFrameSlot(offset, value);
247 }
248
SetCallerFp(unsigned offset,intptr_t value)249 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
250 SetFrameSlot(offset, value);
251 }
252
SetCallerConstantPool(unsigned offset,intptr_t value)253 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
254 // No out-of-line constant pool support.
255 UNREACHABLE();
256 }
257
SetPc(intptr_t pc)258 void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
259
260 #undef __
261
262 } // namespace internal
263 } // namespace v8
264