1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_X64
6
7 #include "src/codegen/macro-assembler.h"
8 #include "src/codegen/register-configuration.h"
9 #include "src/codegen/safepoint-table.h"
10 #include "src/deoptimizer/deoptimizer.h"
11 #include "src/objects/objects-inl.h"
12
13 namespace v8 {
14 namespace internal {
15
16 const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
17 const int Deoptimizer::kNonLazyDeoptExitSize = 0;
18 const int Deoptimizer::kLazyDeoptExitSize = 0;
19
20 #define __ masm->
21
GenerateDeoptimizationEntries(MacroAssembler * masm,Isolate * isolate,DeoptimizeKind deopt_kind)22 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
23 Isolate* isolate,
24 DeoptimizeKind deopt_kind) {
25 NoRootArrayScope no_root_array(masm);
26
27 // Save all general purpose registers before messing with them.
28 const int kNumberOfRegisters = Register::kNumRegisters;
29
30 const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
31 __ AllocateStackSpace(kDoubleRegsSize);
32
33 const RegisterConfiguration* config = RegisterConfiguration::Default();
34 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
35 int code = config->GetAllocatableDoubleCode(i);
36 XMMRegister xmm_reg = XMMRegister::from_code(code);
37 int offset = code * kDoubleSize;
38 __ Movsd(Operand(rsp, offset), xmm_reg);
39 }
40
41 // We push all registers onto the stack, even though we do not need
42 // to restore all later.
43 for (int i = 0; i < kNumberOfRegisters; i++) {
44 Register r = Register::from_code(i);
45 __ pushq(r);
46 }
47
48 const int kSavedRegistersAreaSize =
49 kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
50
51 __ Store(
52 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
53 rbp);
54
55 // We use this to keep the value of the fifth argument temporarily.
56 // Unfortunately we can't store it directly in r8 (used for passing
57 // this on linux), since it is another parameter passing register on windows.
58 Register arg5 = r11;
59
60 // The bailout id is passed using r13 on the stack.
61 __ movq(arg_reg_3, r13);
62
63 // Get the address of the location in the code object
64 // and compute the fp-to-sp delta in register arg5.
65 __ movq(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
66 __ leaq(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
67
68 __ subq(arg5, rbp);
69 __ negq(arg5);
70
71 // Allocate a new deoptimizer object.
72 __ PrepareCallCFunction(6);
73 __ movq(rax, Immediate(0));
74 Label context_check;
75 __ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
76 __ JumpIfSmi(rdi, &context_check);
77 __ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
78 __ bind(&context_check);
79 __ movq(arg_reg_1, rax);
80 __ Set(arg_reg_2, static_cast<int>(deopt_kind));
81 // Args 3 and 4 are already in the right registers.
82
83 // On windows put the arguments on the stack (PrepareCallCFunction
84 // has created space for this). On linux pass the arguments in r8 and r9.
85 #ifdef V8_TARGET_OS_WIN
86 __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
87 __ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
88 __ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
89 #else
90 __ movq(r8, arg5);
91 __ LoadAddress(r9, ExternalReference::isolate_address(isolate));
92 #endif
93
94 {
95 AllowExternalCallThatCantCauseGC scope(masm);
96 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
97 }
98 // Preserve deoptimizer object in register rax and get the input
99 // frame descriptor pointer.
100 __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
101
102 // Fill in the input registers.
103 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
104 int offset =
105 (i * kSystemPointerSize) + FrameDescription::registers_offset();
106 __ PopQuad(Operand(rbx, offset));
107 }
108
109 // Fill in the double input registers.
110 int double_regs_offset = FrameDescription::double_registers_offset();
111 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
112 int dst_offset = i * kDoubleSize + double_regs_offset;
113 __ popq(Operand(rbx, dst_offset));
114 }
115
116 // Mark the stack as not iterable for the CPU profiler which won't be able to
117 // walk the stack without the return address.
118 __ movb(__ ExternalReferenceAsOperand(
119 ExternalReference::stack_is_iterable_address(isolate)),
120 Immediate(0));
121
122 // Remove the return address from the stack.
123 __ addq(rsp, Immediate(kPCOnStackSize));
124
125 // Compute a pointer to the unwinding limit in register rcx; that is
126 // the first stack slot not part of the input frame.
127 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
128 __ addq(rcx, rsp);
129
130 // Unwind the stack down to - but not including - the unwinding
131 // limit and copy the contents of the activation frame to the input
132 // frame description.
133 __ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
134 Label pop_loop_header;
135 __ jmp(&pop_loop_header);
136 Label pop_loop;
137 __ bind(&pop_loop);
138 __ Pop(Operand(rdx, 0));
139 __ addq(rdx, Immediate(sizeof(intptr_t)));
140 __ bind(&pop_loop_header);
141 __ cmpq(rcx, rsp);
142 __ j(not_equal, &pop_loop);
143
144 // Compute the output frame in the deoptimizer.
145 __ pushq(rax);
146 __ PrepareCallCFunction(2);
147 __ movq(arg_reg_1, rax);
148 __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
149 {
150 AllowExternalCallThatCantCauseGC scope(masm);
151 __ CallCFunction(ExternalReference::compute_output_frames_function(), 2);
152 }
153 __ popq(rax);
154
155 __ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
156
157 // Replace the current (input) frame with the output frames.
158 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
159 // Outer loop state: rax = current FrameDescription**, rdx = one past the
160 // last FrameDescription**.
161 __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
162 __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
163 __ leaq(rdx, Operand(rax, rdx, times_system_pointer_size, 0));
164 __ jmp(&outer_loop_header);
165 __ bind(&outer_push_loop);
166 // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
167 __ movq(rbx, Operand(rax, 0));
168 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
169 __ jmp(&inner_loop_header);
170 __ bind(&inner_push_loop);
171 __ subq(rcx, Immediate(sizeof(intptr_t)));
172 __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
173 __ bind(&inner_loop_header);
174 __ testq(rcx, rcx);
175 __ j(not_zero, &inner_push_loop);
176 __ addq(rax, Immediate(kSystemPointerSize));
177 __ bind(&outer_loop_header);
178 __ cmpq(rax, rdx);
179 __ j(below, &outer_push_loop);
180
181 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
182 int code = config->GetAllocatableDoubleCode(i);
183 XMMRegister xmm_reg = XMMRegister::from_code(code);
184 int src_offset = code * kDoubleSize + double_regs_offset;
185 __ Movsd(xmm_reg, Operand(rbx, src_offset));
186 }
187
188 // Push pc and continuation from the last output frame.
189 __ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
190 __ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
191
192 // Push the registers from the last output frame.
193 for (int i = 0; i < kNumberOfRegisters; i++) {
194 int offset =
195 (i * kSystemPointerSize) + FrameDescription::registers_offset();
196 __ PushQuad(Operand(rbx, offset));
197 }
198
199 // Restore the registers from the stack.
200 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
201 Register r = Register::from_code(i);
202 // Do not restore rsp, simply pop the value into the next register
203 // and overwrite this afterwards.
204 if (r == rsp) {
205 DCHECK_GT(i, 0);
206 r = Register::from_code(i - 1);
207 }
208 __ popq(r);
209 }
210
211 __ movb(__ ExternalReferenceAsOperand(
212 ExternalReference::stack_is_iterable_address(isolate)),
213 Immediate(1));
214
215 // Return to the continuation point.
216 __ ret(0);
217 }
218
GetFloatRegister(unsigned n) const219 Float32 RegisterValues::GetFloatRegister(unsigned n) const {
220 return Float32::FromBits(
221 static_cast<uint32_t>(double_registers_[n].get_bits()));
222 }
223
SetCallerPc(unsigned offset,intptr_t value)224 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
225 SetFrameSlot(offset, value);
226 }
227
SetCallerFp(unsigned offset,intptr_t value)228 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
229 SetFrameSlot(offset, value);
230 }
231
SetCallerConstantPool(unsigned offset,intptr_t value)232 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
233 // No embedded constant pool support.
234 UNREACHABLE();
235 }
236
SetPc(intptr_t pc)237 void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
238
239 #undef __
240
241 } // namespace internal
242 } // namespace v8
243
244 #endif // V8_TARGET_ARCH_X64
245