1 /*
2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "ci/ciUtilities.hpp"
31 #include "gc/shared/cardTable.hpp"
32 #include "gc/shared/cardTableBarrierSet.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "nativeInst_x86.hpp"
35 #include "oops/compiledICHolder.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "prims/jvmtiExport.hpp"
38 #include "register_x86.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/signature.hpp"
41 #include "runtime/vframeArray.hpp"
42 #include "utilities/macros.hpp"
43 #include "vmreg_x86.inline.hpp"
44
45 // Implementation of StubAssembler
46
call_RT(Register oop_result1,Register metadata_result,address entry,int args_size)47 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
48 // setup registers
49 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
50 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
51 assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
52 assert(args_size >= 0, "illegal args_size");
53 bool align_stack = false;
54 #ifdef _LP64
55 // At a method handle call, the stack may not be properly aligned
56 // when returning with an exception.
57 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id);
58 #endif
59
60 #ifdef _LP64
61 mov(c_rarg0, thread);
62 set_num_rt_args(0); // Nothing on stack
63 #else
64 set_num_rt_args(1 + args_size);
65
66 // push java thread (becomes first argument of C function)
67 get_thread(thread);
68 push(thread);
69 #endif // _LP64
70
71 int call_offset;
72 if (!align_stack) {
73 set_last_Java_frame(thread, noreg, rbp, NULL);
74 } else {
75 address the_pc = pc();
76 call_offset = offset();
77 set_last_Java_frame(thread, noreg, rbp, the_pc);
78 andptr(rsp, -(StackAlignmentInBytes)); // Align stack
79 }
80
81 // do the call
82 call(RuntimeAddress(entry));
83 if (!align_stack) {
84 call_offset = offset();
85 }
86 // verify callee-saved register
87 #ifdef ASSERT
88 guarantee(thread != rax, "change this code");
89 push(rax);
90 { Label L;
91 get_thread(rax);
92 cmpptr(thread, rax);
93 jcc(Assembler::equal, L);
94 int3();
95 stop("StubAssembler::call_RT: rdi not callee saved?");
96 bind(L);
97 }
98 pop(rax);
99 #endif
100 reset_last_Java_frame(thread, true);
101
102 // discard thread and arguments
103 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
104
105 // check for pending exceptions
106 { Label L;
107 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
108 jcc(Assembler::equal, L);
109 // exception pending => remove activation and forward to exception handler
110 movptr(rax, Address(thread, Thread::pending_exception_offset()));
111 // make sure that the vm_results are cleared
112 if (oop_result1->is_valid()) {
113 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
114 }
115 if (metadata_result->is_valid()) {
116 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
117 }
118 if (frame_size() == no_frame_size) {
119 leave();
120 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
121 } else if (_stub_id == Runtime1::forward_exception_id) {
122 should_not_reach_here();
123 } else {
124 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
125 }
126 bind(L);
127 }
128 // get oop results if there are any and reset the values in the thread
129 if (oop_result1->is_valid()) {
130 get_vm_result(oop_result1, thread);
131 }
132 if (metadata_result->is_valid()) {
133 get_vm_result_2(metadata_result, thread);
134 }
135 return call_offset;
136 }
137
138
call_RT(Register oop_result1,Register metadata_result,address entry,Register arg1)139 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
140 #ifdef _LP64
141 mov(c_rarg1, arg1);
142 #else
143 push(arg1);
144 #endif // _LP64
145 return call_RT(oop_result1, metadata_result, entry, 1);
146 }
147
148
call_RT(Register oop_result1,Register metadata_result,address entry,Register arg1,Register arg2)149 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
150 #ifdef _LP64
151 if (c_rarg1 == arg2) {
152 if (c_rarg2 == arg1) {
153 xchgq(arg1, arg2);
154 } else {
155 mov(c_rarg2, arg2);
156 mov(c_rarg1, arg1);
157 }
158 } else {
159 mov(c_rarg1, arg1);
160 mov(c_rarg2, arg2);
161 }
162 #else
163 push(arg2);
164 push(arg1);
165 #endif // _LP64
166 return call_RT(oop_result1, metadata_result, entry, 2);
167 }
168
169
call_RT(Register oop_result1,Register metadata_result,address entry,Register arg1,Register arg2,Register arg3)170 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
171 #ifdef _LP64
172 // if there is any conflict use the stack
173 if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
174 arg2 == c_rarg1 || arg1 == c_rarg3 ||
175 arg3 == c_rarg1 || arg1 == c_rarg2) {
176 push(arg3);
177 push(arg2);
178 push(arg1);
179 pop(c_rarg1);
180 pop(c_rarg2);
181 pop(c_rarg3);
182 } else {
183 mov(c_rarg1, arg1);
184 mov(c_rarg2, arg2);
185 mov(c_rarg3, arg3);
186 }
187 #else
188 push(arg3);
189 push(arg2);
190 push(arg1);
191 #endif // _LP64
192 return call_RT(oop_result1, metadata_result, entry, 3);
193 }
194
195
196 // Implementation of StubFrame
197
198 class StubFrame: public StackObj {
199 private:
200 StubAssembler* _sasm;
201
202 public:
203 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
204 void load_argument(int offset_in_words, Register reg);
205
206 ~StubFrame();
207 };
208
prologue(const char * name,bool must_gc_arguments)209 void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
210 set_info(name, must_gc_arguments);
211 enter();
212 }
213
epilogue()214 void StubAssembler::epilogue() {
215 leave();
216 ret(0);
217 }
218
219 #define __ _sasm->
220
StubFrame(StubAssembler * sasm,const char * name,bool must_gc_arguments)221 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
222 _sasm = sasm;
223 __ prologue(name, must_gc_arguments);
224 }
225
226 // load parameters that were stored with LIR_Assembler::store_parameter
227 // Note: offsets for store_parameter and load_argument must match
load_argument(int offset_in_words,Register reg)228 void StubFrame::load_argument(int offset_in_words, Register reg) {
229 __ load_parameter(offset_in_words, reg);
230 }
231
232
~StubFrame()233 StubFrame::~StubFrame() {
234 __ epilogue();
235 }
236
237 #undef __
238
239
240 // Implementation of Runtime1
241
242 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
243 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
244
245 // Stack layout for saving/restoring all the registers needed during a runtime
246 // call (this includes deoptimization)
247 // Note: note that users of this frame may well have arguments to some runtime
248 // while these values are on the stack. These positions neglect those arguments
249 // but the code in save_live_registers will take the argument count into
250 // account.
251 //
252 #ifdef _LP64
253 #define SLOT2(x) x,
254 #define SLOT_PER_WORD 2
255 #else
256 #define SLOT2(x)
257 #define SLOT_PER_WORD 1
258 #endif // _LP64
259
260 enum reg_save_layout {
261 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
262 // happen and will assert if the stack size we create is misaligned
263 #ifdef _LP64
264 align_dummy_0, align_dummy_1,
265 #endif // _LP64
266 #ifdef _WIN64
267 // Windows always allocates space for it's argument registers (see
268 // frame::arg_reg_save_area_bytes).
269 arg_reg_save_1, arg_reg_save_1H, // 0, 4
270 arg_reg_save_2, arg_reg_save_2H, // 8, 12
271 arg_reg_save_3, arg_reg_save_3H, // 16, 20
272 arg_reg_save_4, arg_reg_save_4H, // 24, 28
273 #endif // _WIN64
274 xmm_regs_as_doubles_off, // 32
275 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
276 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
277 // fpu_state_end_off is exclusive
278 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
279 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356
280 extra_space_offset, // 360
281 #ifdef _LP64
282 r15_off = extra_space_offset, r15H_off, // 360, 364
283 r14_off, r14H_off, // 368, 372
284 r13_off, r13H_off, // 376, 380
285 r12_off, r12H_off, // 384, 388
286 r11_off, r11H_off, // 392, 396
287 r10_off, r10H_off, // 400, 404
288 r9_off, r9H_off, // 408, 412
289 r8_off, r8H_off, // 416, 420
290 rdi_off, rdiH_off, // 424, 428
291 #else
292 rdi_off = extra_space_offset,
293 #endif // _LP64
294 rsi_off, SLOT2(rsiH_off) // 432, 436
295 rbp_off, SLOT2(rbpH_off) // 440, 444
296 rsp_off, SLOT2(rspH_off) // 448, 452
297 rbx_off, SLOT2(rbxH_off) // 456, 460
298 rdx_off, SLOT2(rdxH_off) // 464, 468
299 rcx_off, SLOT2(rcxH_off) // 472, 476
300 rax_off, SLOT2(raxH_off) // 480, 484
301 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
302 return_off, SLOT2(returnH_off) // 496, 500
303 reg_save_frame_size // As noted: neglects any parameters to runtime // 504
304 };
305
306 // Save off registers which might be killed by calls into the runtime.
307 // Tries to smart of about FP registers. In particular we separate
308 // saving and describing the FPU registers for deoptimization since we
309 // have to save the FPU registers twice if we describe them and on P4
310 // saving FPU registers which don't contain anything appears
311 // expensive. The deopt blob is the only thing which needs to
312 // describe FPU registers. In all other cases it should be sufficient
313 // to simply save their current value.
314
generate_oop_map(StubAssembler * sasm,int num_rt_args,bool save_fpu_registers=true)315 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
316 bool save_fpu_registers = true) {
317
318 // In 64bit all the args are in regs so there are no additional stack slots
319 LP64_ONLY(num_rt_args = 0);
320 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
321 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
322 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
323
324 // record saved value locations in an OopMap
325 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
326 OopMap* map = new OopMap(frame_size_in_slots, 0);
327 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
328 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
329 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
330 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
331 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
332 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
333 #ifdef _LP64
334 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg());
335 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg());
336 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
337 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
338 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
339 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
340 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
341 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
342
343 // This is stupid but needed.
344 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
345 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
346 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
347 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
348 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
349 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
350
351 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next());
352 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next());
353 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
354 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
355 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
356 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
357 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
358 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
359 #endif // _LP64
360
361 int xmm_bypass_limit = FrameMap::nof_xmm_regs;
362 #ifdef _LP64
363 if (UseAVX < 3) {
364 xmm_bypass_limit = xmm_bypass_limit / 2;
365 }
366 #endif
367
368 if (save_fpu_registers) {
369 if (UseSSE < 2) {
370 int fpu_off = float_regs_as_doubles_off;
371 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
372 VMReg fpu_name_0 = FrameMap::fpu_regname(n);
373 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0);
374 // %%% This is really a waste but we'll keep things as they were for now
375 if (true) {
376 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
377 }
378 fpu_off += 2;
379 }
380 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
381 }
382
383 if (UseSSE >= 2) {
384 int xmm_off = xmm_regs_as_doubles_off;
385 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
386 if (n < xmm_bypass_limit) {
387 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
388 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
389 // %%% This is really a waste but we'll keep things as they were for now
390 if (true) {
391 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
392 }
393 }
394 xmm_off += 2;
395 }
396 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
397
398 } else if (UseSSE == 1) {
399 int xmm_off = xmm_regs_as_doubles_off;
400 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
401 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
402 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
403 xmm_off += 2;
404 }
405 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
406 }
407 }
408
409 return map;
410 }
411
412 #define __ this->
413
save_live_registers_no_oop_map(bool save_fpu_registers)414 void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) {
415 __ block_comment("save_live_registers");
416
417 __ pusha(); // integer registers
418
419 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
420 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
421
422 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
423
424 #ifdef ASSERT
425 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
426 #endif
427
428 if (save_fpu_registers) {
429 if (UseSSE < 2) {
430 // save FPU stack
431 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
432 __ fwait();
433
434 #ifdef ASSERT
435 Label ok;
436 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
437 __ jccb(Assembler::equal, ok);
438 __ stop("corrupted control word detected");
439 __ bind(ok);
440 #endif
441
442 // Reset the control word to guard against exceptions being unmasked
443 // since fstp_d can cause FPU stack underflow exceptions. Write it
444 // into the on stack copy and then reload that to make sure that the
445 // current and future values are correct.
446 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
447 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
448
449 // Save the FPU registers in de-opt-able form
450 int offset = 0;
451 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
452 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset));
453 offset += 8;
454 }
455 }
456
457 if (UseSSE >= 2) {
458 // save XMM registers
459 // XMM registers can contain float or double values, but this is not known here,
460 // so always save them as doubles.
461 // note that float values are _not_ converted automatically, so for float values
462 // the second word contains only garbage data.
463 int xmm_bypass_limit = FrameMap::nof_xmm_regs;
464 int offset = 0;
465 #ifdef _LP64
466 if (UseAVX < 3) {
467 xmm_bypass_limit = xmm_bypass_limit / 2;
468 }
469 #endif
470 for (int n = 0; n < xmm_bypass_limit; n++) {
471 XMMRegister xmm_name = as_XMMRegister(n);
472 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name);
473 offset += 8;
474 }
475 } else if (UseSSE == 1) {
476 // save XMM registers as float because double not supported without SSE2(num MMX == num fpu)
477 int offset = 0;
478 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
479 XMMRegister xmm_name = as_XMMRegister(n);
480 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name);
481 offset += 8;
482 }
483 }
484 }
485
486 // FPU stack must be empty now
487 __ verify_FPU(0, "save_live_registers");
488 }
489
490 #undef __
491 #define __ sasm->
492
restore_fpu(C1_MacroAssembler * sasm,bool restore_fpu_registers)493 static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) {
494 if (restore_fpu_registers) {
495 if (UseSSE >= 2) {
496 // restore XMM registers
497 int xmm_bypass_limit = FrameMap::nof_xmm_regs;
498 #ifdef _LP64
499 if (UseAVX < 3) {
500 xmm_bypass_limit = xmm_bypass_limit / 2;
501 }
502 #endif
503 int offset = 0;
504 for (int n = 0; n < xmm_bypass_limit; n++) {
505 XMMRegister xmm_name = as_XMMRegister(n);
506 __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset));
507 offset += 8;
508 }
509 } else if (UseSSE == 1) {
510 // restore XMM registers(num MMX == num fpu)
511 int offset = 0;
512 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
513 XMMRegister xmm_name = as_XMMRegister(n);
514 __ movflt(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset));
515 offset += 8;
516 }
517 }
518
519 if (UseSSE < 2) {
520 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
521 } else {
522 // check that FPU stack is really empty
523 __ verify_FPU(0, "restore_live_registers");
524 }
525
526 } else {
527 // check that FPU stack is really empty
528 __ verify_FPU(0, "restore_live_registers");
529 }
530
531 #ifdef ASSERT
532 {
533 Label ok;
534 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
535 __ jcc(Assembler::equal, ok);
536 __ stop("bad offsets in frame");
537 __ bind(ok);
538 }
539 #endif // ASSERT
540
541 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
542 }
543
544 #undef __
545 #define __ this->
546
restore_live_registers(bool restore_fpu_registers)547 void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) {
548 __ block_comment("restore_live_registers");
549
550 restore_fpu(this, restore_fpu_registers);
551 __ popa();
552 }
553
554
restore_live_registers_except_rax(bool restore_fpu_registers)555 void C1_MacroAssembler::restore_live_registers_except_rax(bool restore_fpu_registers) {
556 __ block_comment("restore_live_registers_except_rax");
557
558 restore_fpu(this, restore_fpu_registers);
559
560 #ifdef _LP64
561 __ movptr(r15, Address(rsp, 0));
562 __ movptr(r14, Address(rsp, wordSize));
563 __ movptr(r13, Address(rsp, 2 * wordSize));
564 __ movptr(r12, Address(rsp, 3 * wordSize));
565 __ movptr(r11, Address(rsp, 4 * wordSize));
566 __ movptr(r10, Address(rsp, 5 * wordSize));
567 __ movptr(r9, Address(rsp, 6 * wordSize));
568 __ movptr(r8, Address(rsp, 7 * wordSize));
569 __ movptr(rdi, Address(rsp, 8 * wordSize));
570 __ movptr(rsi, Address(rsp, 9 * wordSize));
571 __ movptr(rbp, Address(rsp, 10 * wordSize));
572 // skip rsp
573 __ movptr(rbx, Address(rsp, 12 * wordSize));
574 __ movptr(rdx, Address(rsp, 13 * wordSize));
575 __ movptr(rcx, Address(rsp, 14 * wordSize));
576
577 __ addptr(rsp, 16 * wordSize);
578 #else
579
580 __ pop(rdi);
581 __ pop(rsi);
582 __ pop(rbp);
583 __ pop(rbx); // skip this value
584 __ pop(rbx);
585 __ pop(rdx);
586 __ pop(rcx);
587 __ addptr(rsp, BytesPerWord);
588 #endif // _LP64
589 }
590
591 #undef __
592 #define __ sasm->
593
save_live_registers(StubAssembler * sasm,int num_rt_args,bool save_fpu_registers=true)594 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
595 bool save_fpu_registers = true) {
596 __ save_live_registers_no_oop_map(save_fpu_registers);
597 return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
598 }
599
restore_live_registers(StubAssembler * sasm,bool restore_fpu_registers=true)600 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
601 __ restore_live_registers(restore_fpu_registers);
602 }
603
restore_live_registers_except_rax(StubAssembler * sasm,bool restore_fpu_registers=true)604 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
605 sasm->restore_live_registers_except_rax(restore_fpu_registers);
606 }
607
608
initialize_pd()609 void Runtime1::initialize_pd() {
610 // nothing to do
611 }
612
613
614 // Target: the entry point of the method that creates and posts the exception oop.
615 // has_argument: true if the exception needs arguments (passed on the stack because
616 // registers must be preserved).
generate_exception_throw(StubAssembler * sasm,address target,bool has_argument)617 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
618 // Preserve all registers.
619 int num_rt_args = has_argument ? (2 + 1) : 1;
620 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
621
622 // Now all registers are saved and can be used freely.
623 // Verify that no old value is used accidentally.
624 __ invalidate_registers(true, true, true, true, true, true);
625
626 // Registers used by this stub.
627 const Register temp_reg = rbx;
628
629 // Load arguments for exception that are passed as arguments into the stub.
630 if (has_argument) {
631 #ifdef _LP64
632 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
633 __ movptr(c_rarg2, Address(rbp, 3*BytesPerWord));
634 #else
635 __ movptr(temp_reg, Address(rbp, 3*BytesPerWord));
636 __ push(temp_reg);
637 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
638 __ push(temp_reg);
639 #endif // _LP64
640 }
641 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
642
643 OopMapSet* oop_maps = new OopMapSet();
644 oop_maps->add_gc_map(call_offset, oop_map);
645
646 __ stop("should not reach here");
647
648 return oop_maps;
649 }
650
651
generate_handle_exception(StubID id,StubAssembler * sasm)652 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
653 __ block_comment("generate_handle_exception");
654
655 // incoming parameters
656 const Register exception_oop = rax;
657 const Register exception_pc = rdx;
658 // other registers used in this stub
659 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
660
661 // Save registers, if required.
662 OopMapSet* oop_maps = new OopMapSet();
663 OopMap* oop_map = NULL;
664 switch (id) {
665 case forward_exception_id:
666 // We're handling an exception in the context of a compiled frame.
667 // The registers have been saved in the standard places. Perform
668 // an exception lookup in the caller and dispatch to the handler
669 // if found. Otherwise unwind and dispatch to the callers
670 // exception handler.
671 oop_map = generate_oop_map(sasm, 1 /*thread*/);
672
673 // load and clear pending exception oop into RAX
674 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
675 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
676
677 // load issuing PC (the return address for this stub) into rdx
678 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
679
680 // make sure that the vm_results are cleared (may be unnecessary)
681 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
682 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
683 break;
684 case handle_exception_nofpu_id:
685 case handle_exception_id:
686 // At this point all registers MAY be live.
687 oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
688 break;
689 case handle_exception_from_callee_id: {
690 // At this point all registers except exception oop (RAX) and
691 // exception pc (RDX) are dead.
692 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
693 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
694 sasm->set_frame_size(frame_size);
695 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
696 break;
697 }
698 default: ShouldNotReachHere();
699 }
700
701 #ifdef TIERED
702 // C2 can leave the fpu stack dirty
703 if (UseSSE < 2) {
704 __ empty_FPU_stack();
705 }
706 #endif // TIERED
707
708 // verify that only rax, and rdx is valid at this time
709 __ invalidate_registers(false, true, true, false, true, true);
710 // verify that rax, contains a valid exception
711 __ verify_not_null_oop(exception_oop);
712
713 // load address of JavaThread object for thread-local data
714 NOT_LP64(__ get_thread(thread);)
715
716 #ifdef ASSERT
717 // check that fields in JavaThread for exception oop and issuing pc are
718 // empty before writing to them
719 Label oop_empty;
720 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
721 __ jcc(Assembler::equal, oop_empty);
722 __ stop("exception oop already set");
723 __ bind(oop_empty);
724
725 Label pc_empty;
726 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
727 __ jcc(Assembler::equal, pc_empty);
728 __ stop("exception pc already set");
729 __ bind(pc_empty);
730 #endif
731
732 // save exception oop and issuing pc into JavaThread
733 // (exception handler will load it from here)
734 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
735 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
736
737 // patch throwing pc into return address (has bci & oop map)
738 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
739
740 // compute the exception handler.
741 // the exception oop and the throwing pc are read from the fields in JavaThread
742 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
743 oop_maps->add_gc_map(call_offset, oop_map);
744
745 // rax: handler address
746 // will be the deopt blob if nmethod was deoptimized while we looked up
747 // handler regardless of whether handler existed in the nmethod.
748
749 // only rax, is valid at this time, all other registers have been destroyed by the runtime call
750 __ invalidate_registers(false, true, true, true, true, true);
751
752 // patch the return address, this stub will directly return to the exception handler
753 __ movptr(Address(rbp, 1*BytesPerWord), rax);
754
755 switch (id) {
756 case forward_exception_id:
757 case handle_exception_nofpu_id:
758 case handle_exception_id:
759 // Restore the registers that were saved at the beginning.
760 restore_live_registers(sasm, id != handle_exception_nofpu_id);
761 break;
762 case handle_exception_from_callee_id:
763 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
764 // since we do a leave anyway.
765
766 // Pop the return address.
767 __ leave();
768 __ pop(rcx);
769 __ jmp(rcx); // jump to exception handler
770 break;
771 default: ShouldNotReachHere();
772 }
773
774 return oop_maps;
775 }
776
777
generate_unwind_exception(StubAssembler * sasm)778 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
779 // incoming parameters
780 const Register exception_oop = rax;
781 // callee-saved copy of exception_oop during runtime call
782 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
783 // other registers used in this stub
784 const Register exception_pc = rdx;
785 const Register handler_addr = rbx;
786 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
787
788 // verify that only rax, is valid at this time
789 __ invalidate_registers(false, true, true, true, true, true);
790
791 #ifdef ASSERT
792 // check that fields in JavaThread for exception oop and issuing pc are empty
793 NOT_LP64(__ get_thread(thread);)
794 Label oop_empty;
795 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
796 __ jcc(Assembler::equal, oop_empty);
797 __ stop("exception oop must be empty");
798 __ bind(oop_empty);
799
800 Label pc_empty;
801 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
802 __ jcc(Assembler::equal, pc_empty);
803 __ stop("exception pc must be empty");
804 __ bind(pc_empty);
805 #endif
806
807 // clear the FPU stack in case any FPU results are left behind
808 __ empty_FPU_stack();
809
810 // save exception_oop in callee-saved register to preserve it during runtime calls
811 __ verify_not_null_oop(exception_oop);
812 __ movptr(exception_oop_callee_saved, exception_oop);
813
814 NOT_LP64(__ get_thread(thread);)
815 // Get return address (is on top of stack after leave).
816 __ movptr(exception_pc, Address(rsp, 0));
817
818 // search the exception handler address of the caller (using the return address)
819 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
820 // rax: exception handler address of the caller
821
822 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
823 __ invalidate_registers(false, true, true, true, false, true);
824
825 // move result of call into correct register
826 __ movptr(handler_addr, rax);
827
828 // Restore exception oop to RAX (required convention of exception handler).
829 __ movptr(exception_oop, exception_oop_callee_saved);
830
831 // verify that there is really a valid exception in rax
832 __ verify_not_null_oop(exception_oop);
833
834 // get throwing pc (= return address).
835 // rdx has been destroyed by the call, so it must be set again
836 // the pop is also necessary to simulate the effect of a ret(0)
837 __ pop(exception_pc);
838
839 // continue at exception handler (return address removed)
840 // note: do *not* remove arguments when unwinding the
841 // activation since the caller assumes having
842 // all arguments on the stack when entering the
843 // runtime to determine the exception handler
844 // (GC happens at call site with arguments!)
845 // rax: exception oop
846 // rdx: throwing pc
847 // rbx: exception handler
848 __ jmp(handler_addr);
849 }
850
851
generate_patching(StubAssembler * sasm,address target)852 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
853 // use the maximum number of runtime-arguments here because it is difficult to
854 // distinguish each RT-Call.
855 // Note: This number affects also the RT-Call in generate_handle_exception because
856 // the oop-map is shared for all calls.
857 const int num_rt_args = 2; // thread + dummy
858
859 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
860 assert(deopt_blob != NULL, "deoptimization blob must have been created");
861
862 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
863
864 #ifdef _LP64
865 const Register thread = r15_thread;
866 // No need to worry about dummy
867 __ mov(c_rarg0, thread);
868 #else
869 __ push(rax); // push dummy
870
871 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
872 // push java thread (becomes first argument of C function)
873 __ get_thread(thread);
874 __ push(thread);
875 #endif // _LP64
876 __ set_last_Java_frame(thread, noreg, rbp, NULL);
877 // do the call
878 __ call(RuntimeAddress(target));
879 OopMapSet* oop_maps = new OopMapSet();
880 oop_maps->add_gc_map(__ offset(), oop_map);
881 // verify callee-saved register
882 #ifdef ASSERT
883 guarantee(thread != rax, "change this code");
884 __ push(rax);
885 { Label L;
886 __ get_thread(rax);
887 __ cmpptr(thread, rax);
888 __ jcc(Assembler::equal, L);
889 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
890 __ bind(L);
891 }
892 __ pop(rax);
893 #endif
894 __ reset_last_Java_frame(thread, true);
895 #ifndef _LP64
896 __ pop(rcx); // discard thread arg
897 __ pop(rcx); // discard dummy
898 #endif // _LP64
899
900 // check for pending exceptions
901 { Label L;
902 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
903 __ jcc(Assembler::equal, L);
904 // exception pending => remove activation and forward to exception handler
905
906 __ testptr(rax, rax); // have we deoptimized?
907 __ jump_cc(Assembler::equal,
908 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
909
910 // the deopt blob expects exceptions in the special fields of
911 // JavaThread, so copy and clear pending exception.
912
913 // load and clear pending exception
914 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
915 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
916
917 // check that there is really a valid exception
918 __ verify_not_null_oop(rax);
919
920 // load throwing pc: this is the return address of the stub
921 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size));
922
923 #ifdef ASSERT
924 // check that fields in JavaThread for exception oop and issuing pc are empty
925 Label oop_empty;
926 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
927 __ jcc(Assembler::equal, oop_empty);
928 __ stop("exception oop must be empty");
929 __ bind(oop_empty);
930
931 Label pc_empty;
932 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
933 __ jcc(Assembler::equal, pc_empty);
934 __ stop("exception pc must be empty");
935 __ bind(pc_empty);
936 #endif
937
938 // store exception oop and throwing pc to JavaThread
939 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax);
940 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx);
941
942 restore_live_registers(sasm);
943
944 __ leave();
945 __ addptr(rsp, BytesPerWord); // remove return address from stack
946
947 // Forward the exception directly to deopt blob. We can blow no
948 // registers and must leave throwing pc on the stack. A patch may
949 // have values live in registers so the entry point with the
950 // exception in tls.
951 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
952
953 __ bind(L);
954 }
955
956
957 // Runtime will return true if the nmethod has been deoptimized during
958 // the patching process. In that case we must do a deopt reexecute instead.
959
960 Label cont;
961
962 __ testptr(rax, rax); // have we deoptimized?
963 __ jcc(Assembler::equal, cont); // no
964
965 // Will reexecute. Proper return address is already on the stack we just restore
966 // registers, pop all of our frame but the return address and jump to the deopt blob
967 restore_live_registers(sasm);
968 __ leave();
969 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
970
971 __ bind(cont);
972 restore_live_registers(sasm);
973 __ leave();
974 __ ret(0);
975
976 return oop_maps;
977 }
978
979
generate_code_for(StubID id,StubAssembler * sasm)980 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
981
982 // for better readability
983 const bool must_gc_arguments = true;
984 const bool dont_gc_arguments = false;
985
986 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
987 bool save_fpu_registers = true;
988
989 // stub code & info for the different stubs
990 OopMapSet* oop_maps = NULL;
991 switch (id) {
992 case forward_exception_id:
993 {
994 oop_maps = generate_handle_exception(id, sasm);
995 __ leave();
996 __ ret(0);
997 }
998 break;
999
1000 case new_instance_id:
1001 case fast_new_instance_id:
1002 case fast_new_instance_init_check_id:
1003 {
1004 Register klass = rdx; // Incoming
1005 Register obj = rax; // Result
1006
1007 if (id == new_instance_id) {
1008 __ set_info("new_instance", dont_gc_arguments);
1009 } else if (id == fast_new_instance_id) {
1010 __ set_info("fast new_instance", dont_gc_arguments);
1011 } else {
1012 assert(id == fast_new_instance_init_check_id, "bad StubID");
1013 __ set_info("fast new_instance init check", dont_gc_arguments);
1014 }
1015
1016 // If TLAB is disabled, see if there is support for inlining contiguous
1017 // allocations.
1018 // Otherwise, just go to the slow path.
1019 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && !UseTLAB
1020 && Universe::heap()->supports_inline_contig_alloc()) {
1021 Label slow_path;
1022 Register obj_size = rcx;
1023 Register t1 = rbx;
1024 Register t2 = rsi;
1025 assert_different_registers(klass, obj, obj_size, t1, t2);
1026
1027 __ push(rdi);
1028 __ push(rbx);
1029
1030 if (id == fast_new_instance_init_check_id) {
1031 // make sure the klass is initialized
1032 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
1033 __ jcc(Assembler::notEqual, slow_path);
1034 }
1035
1036 #ifdef ASSERT
1037 // assert object can be fast path allocated
1038 {
1039 Label ok, not_ok;
1040 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1041 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
1042 __ jcc(Assembler::lessEqual, not_ok);
1043 __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1044 __ jcc(Assembler::zero, ok);
1045 __ bind(not_ok);
1046 __ stop("assert(can be fast path allocated)");
1047 __ should_not_reach_here();
1048 __ bind(ok);
1049 }
1050 #endif // ASSERT
1051
1052 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
1053 NOT_LP64(__ get_thread(thread));
1054
1055 // get the instance size (size is postive so movl is fine for 64bit)
1056 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1057
1058 __ eden_allocate(thread, obj, obj_size, 0, t1, slow_path);
1059
1060 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
1061 __ verify_oop(obj);
1062 __ pop(rbx);
1063 __ pop(rdi);
1064 __ ret(0);
1065
1066 __ bind(slow_path);
1067 __ pop(rbx);
1068 __ pop(rdi);
1069 }
1070
1071 __ enter();
1072 OopMap* map = save_live_registers(sasm, 2);
1073 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1074 oop_maps = new OopMapSet();
1075 oop_maps->add_gc_map(call_offset, map);
1076 restore_live_registers_except_rax(sasm);
1077 __ verify_oop(obj);
1078 __ leave();
1079 __ ret(0);
1080
1081 // rax,: new instance
1082 }
1083
1084 break;
1085
1086 case counter_overflow_id:
1087 {
1088 Register bci = rax, method = rbx;
1089 __ enter();
1090 OopMap* map = save_live_registers(sasm, 3);
1091 // Retrieve bci
1092 __ movl(bci, Address(rbp, 2*BytesPerWord));
1093 // And a pointer to the Method*
1094 __ movptr(method, Address(rbp, 3*BytesPerWord));
1095 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1096 oop_maps = new OopMapSet();
1097 oop_maps->add_gc_map(call_offset, map);
1098 restore_live_registers(sasm);
1099 __ leave();
1100 __ ret(0);
1101 }
1102 break;
1103
1104 case new_type_array_id:
1105 case new_object_array_id:
1106 {
1107 Register length = rbx; // Incoming
1108 Register klass = rdx; // Incoming
1109 Register obj = rax; // Result
1110
1111 if (id == new_type_array_id) {
1112 __ set_info("new_type_array", dont_gc_arguments);
1113 } else {
1114 __ set_info("new_object_array", dont_gc_arguments);
1115 }
1116
1117 #ifdef ASSERT
1118 // assert object type is really an array of the proper kind
1119 {
1120 Label ok;
1121 Register t0 = obj;
1122 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1123 __ sarl(t0, Klass::_lh_array_tag_shift);
1124 int tag = ((id == new_type_array_id)
1125 ? Klass::_lh_array_tag_type_value
1126 : Klass::_lh_array_tag_obj_value);
1127 __ cmpl(t0, tag);
1128 __ jcc(Assembler::equal, ok);
1129 __ stop("assert(is an array klass)");
1130 __ should_not_reach_here();
1131 __ bind(ok);
1132 }
1133 #endif // ASSERT
1134
1135 // If TLAB is disabled, see if there is support for inlining contiguous
1136 // allocations.
1137 // Otherwise, just go to the slow path.
1138 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
1139 Register arr_size = rsi;
1140 Register t1 = rcx; // must be rcx for use as shift count
1141 Register t2 = rdi;
1142 Label slow_path;
1143
1144 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1145 // since size is positive movl does right thing on 64bit
1146 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1147 // since size is postive movl does right thing on 64bit
1148 __ movl(arr_size, length);
1149 assert(t1 == rcx, "fixed register usage");
1150 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1151 __ shrptr(t1, Klass::_lh_header_size_shift);
1152 __ andptr(t1, Klass::_lh_header_size_mask);
1153 __ addptr(arr_size, t1);
1154 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1155 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1156
1157 // Using t2 for non 64-bit.
1158 const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread);
1159 NOT_LP64(__ get_thread(thread));
1160 __ eden_allocate(thread, obj, arr_size, 0, t1, slow_path); // preserves arr_size
1161
1162 __ initialize_header(obj, klass, length, t1, t2);
1163 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1164 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1165 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1166 __ andptr(t1, Klass::_lh_header_size_mask);
1167 __ subptr(arr_size, t1); // body length
1168 __ addptr(t1, obj); // body start
1169 __ initialize_body(t1, arr_size, 0, t2);
1170 __ verify_oop(obj);
1171 __ ret(0);
1172
1173 __ bind(slow_path);
1174 }
1175
1176 __ enter();
1177 OopMap* map = save_live_registers(sasm, 3);
1178 int call_offset;
1179 if (id == new_type_array_id) {
1180 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1181 } else {
1182 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1183 }
1184
1185 oop_maps = new OopMapSet();
1186 oop_maps->add_gc_map(call_offset, map);
1187 restore_live_registers_except_rax(sasm);
1188
1189 __ verify_oop(obj);
1190 __ leave();
1191 __ ret(0);
1192
1193 // rax,: new array
1194 }
1195 break;
1196
1197 case new_multi_array_id:
1198 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1199 // rax,: klass
1200 // rbx,: rank
1201 // rcx: address of 1st dimension
1202 OopMap* map = save_live_registers(sasm, 4);
1203 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1204
1205 oop_maps = new OopMapSet();
1206 oop_maps->add_gc_map(call_offset, map);
1207 restore_live_registers_except_rax(sasm);
1208
1209 // rax,: new multi array
1210 __ verify_oop(rax);
1211 }
1212 break;
1213
1214 case register_finalizer_id:
1215 {
1216 __ set_info("register_finalizer", dont_gc_arguments);
1217
1218 // This is called via call_runtime so the arguments
1219 // will be place in C abi locations
1220
1221 #ifdef _LP64
1222 __ verify_oop(c_rarg0);
1223 __ mov(rax, c_rarg0);
1224 #else
1225 // The object is passed on the stack and we haven't pushed a
1226 // frame yet so it's one work away from top of stack.
1227 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1228 __ verify_oop(rax);
1229 #endif // _LP64
1230
1231 // load the klass and check the has finalizer flag
1232 Label register_finalizer;
1233 Register t = rsi;
1234 __ load_klass(t, rax);
1235 __ movl(t, Address(t, Klass::access_flags_offset()));
1236 __ testl(t, JVM_ACC_HAS_FINALIZER);
1237 __ jcc(Assembler::notZero, register_finalizer);
1238 __ ret(0);
1239
1240 __ bind(register_finalizer);
1241 __ enter();
1242 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
1243 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
1244 oop_maps = new OopMapSet();
1245 oop_maps->add_gc_map(call_offset, oop_map);
1246
1247 // Now restore all the live registers
1248 restore_live_registers(sasm);
1249
1250 __ leave();
1251 __ ret(0);
1252 }
1253 break;
1254
1255 case throw_range_check_failed_id:
1256 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1257 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1258 }
1259 break;
1260
1261 case throw_index_exception_id:
1262 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1263 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1264 }
1265 break;
1266
1267 case throw_div0_exception_id:
1268 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
1269 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
1270 }
1271 break;
1272
1273 case throw_null_pointer_exception_id:
1274 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
1275 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
1276 }
1277 break;
1278
1279 case handle_exception_nofpu_id:
1280 case handle_exception_id:
1281 { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1282 oop_maps = generate_handle_exception(id, sasm);
1283 }
1284 break;
1285
1286 case handle_exception_from_callee_id:
1287 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1288 oop_maps = generate_handle_exception(id, sasm);
1289 }
1290 break;
1291
1292 case unwind_exception_id:
1293 { __ set_info("unwind_exception", dont_gc_arguments);
1294 // note: no stubframe since we are about to leave the current
1295 // activation and we are calling a leaf VM function only.
1296 generate_unwind_exception(sasm);
1297 }
1298 break;
1299
1300 case throw_array_store_exception_id:
1301 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1302 // tos + 0: link
1303 // + 1: return address
1304 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1305 }
1306 break;
1307
1308 case throw_class_cast_exception_id:
1309 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1310 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1311 }
1312 break;
1313
1314 case throw_incompatible_class_change_error_id:
1315 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1316 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1317 }
1318 break;
1319
1320 case slow_subtype_check_id:
1321 {
1322 // Typical calling sequence:
1323 // __ push(klass_RInfo); // object klass or other subclass
1324 // __ push(sup_k_RInfo); // array element klass or other superclass
1325 // __ call(slow_subtype_check);
1326 // Note that the subclass is pushed first, and is therefore deepest.
1327 // Previous versions of this code reversed the names 'sub' and 'super'.
1328 // This was operationally harmless but made the code unreadable.
1329 enum layout {
1330 rax_off, SLOT2(raxH_off)
1331 rcx_off, SLOT2(rcxH_off)
1332 rsi_off, SLOT2(rsiH_off)
1333 rdi_off, SLOT2(rdiH_off)
1334 // saved_rbp_off, SLOT2(saved_rbpH_off)
1335 return_off, SLOT2(returnH_off)
1336 sup_k_off, SLOT2(sup_kH_off)
1337 klass_off, SLOT2(superH_off)
1338 framesize,
1339 result_off = klass_off // deepest argument is also the return value
1340 };
1341
1342 __ set_info("slow_subtype_check", dont_gc_arguments);
1343 __ push(rdi);
1344 __ push(rsi);
1345 __ push(rcx);
1346 __ push(rax);
1347
1348 // This is called by pushing args and not with C abi
1349 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1350 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1351
1352 Label miss;
1353 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
1354
1355 // fallthrough on success:
1356 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
1357 __ pop(rax);
1358 __ pop(rcx);
1359 __ pop(rsi);
1360 __ pop(rdi);
1361 __ ret(0);
1362
1363 __ bind(miss);
1364 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
1365 __ pop(rax);
1366 __ pop(rcx);
1367 __ pop(rsi);
1368 __ pop(rdi);
1369 __ ret(0);
1370 }
1371 break;
1372
1373 case monitorenter_nofpu_id:
1374 save_fpu_registers = false;
1375 // fall through
1376 case monitorenter_id:
1377 {
1378 StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1379 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
1380
1381 // Called with store_parameter and not C abi
1382
1383 f.load_argument(1, rax); // rax,: object
1384 f.load_argument(0, rbx); // rbx,: lock address
1385
1386 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
1387
1388 oop_maps = new OopMapSet();
1389 oop_maps->add_gc_map(call_offset, map);
1390 restore_live_registers(sasm, save_fpu_registers);
1391 }
1392 break;
1393
1394 case monitorexit_nofpu_id:
1395 save_fpu_registers = false;
1396 // fall through
1397 case monitorexit_id:
1398 {
1399 StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1400 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
1401
1402 // Called with store_parameter and not C abi
1403
1404 f.load_argument(0, rax); // rax,: lock address
1405
1406 // note: really a leaf routine but must setup last java sp
1407 // => use call_RT for now (speed can be improved by
1408 // doing last java sp setup manually)
1409 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax);
1410
1411 oop_maps = new OopMapSet();
1412 oop_maps->add_gc_map(call_offset, map);
1413 restore_live_registers(sasm, save_fpu_registers);
1414 }
1415 break;
1416
1417 case deoptimize_id:
1418 {
1419 StubFrame f(sasm, "deoptimize", dont_gc_arguments);
1420 const int num_rt_args = 2; // thread, trap_request
1421 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
1422 f.load_argument(0, rax);
1423 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax);
1424 oop_maps = new OopMapSet();
1425 oop_maps->add_gc_map(call_offset, oop_map);
1426 restore_live_registers(sasm);
1427 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1428 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1429 __ leave();
1430 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1431 }
1432 break;
1433
1434 case access_field_patching_id:
1435 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1436 // we should set up register map
1437 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1438 }
1439 break;
1440
1441 case load_klass_patching_id:
1442 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1443 // we should set up register map
1444 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1445 }
1446 break;
1447
1448 case load_mirror_patching_id:
1449 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);
1450 // we should set up register map
1451 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
1452 }
1453 break;
1454
1455 case load_appendix_patching_id:
1456 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
1457 // we should set up register map
1458 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
1459 }
1460 break;
1461
1462 case dtrace_object_alloc_id:
1463 { // rax,: object
1464 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1465 // we can't gc here so skip the oopmap but make sure that all
1466 // the live registers get saved.
1467 save_live_registers(sasm, 1);
1468
1469 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
1470 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
1471 NOT_LP64(__ pop(rax));
1472
1473 restore_live_registers(sasm);
1474 }
1475 break;
1476
1477 case fpu2long_stub_id:
1478 {
1479 // rax, and rdx are destroyed, but should be free since the result is returned there
1480 // preserve rsi,ecx
1481 __ push(rsi);
1482 __ push(rcx);
1483 LP64_ONLY(__ push(rdx);)
1484
1485 // check for NaN
1486 Label return0, do_return, return_min_jlong, do_convert;
1487
1488 Address value_high_word(rsp, wordSize + 4);
1489 Address value_low_word(rsp, wordSize);
1490 Address result_high_word(rsp, 3*wordSize + 4);
1491 Address result_low_word(rsp, 3*wordSize);
1492
1493 __ subptr(rsp, 32); // more than enough on 32bit
1494 __ fst_d(value_low_word);
1495 __ movl(rax, value_high_word);
1496 __ andl(rax, 0x7ff00000);
1497 __ cmpl(rax, 0x7ff00000);
1498 __ jcc(Assembler::notEqual, do_convert);
1499 __ movl(rax, value_high_word);
1500 __ andl(rax, 0xfffff);
1501 __ orl(rax, value_low_word);
1502 __ jcc(Assembler::notZero, return0);
1503
1504 __ bind(do_convert);
1505 __ fnstcw(Address(rsp, 0));
1506 __ movzwl(rax, Address(rsp, 0));
1507 __ orl(rax, 0xc00);
1508 __ movw(Address(rsp, 2), rax);
1509 __ fldcw(Address(rsp, 2));
1510 __ fwait();
1511 __ fistp_d(result_low_word);
1512 __ fldcw(Address(rsp, 0));
1513 __ fwait();
1514 // This gets the entire long in rax on 64bit
1515 __ movptr(rax, result_low_word);
1516 // testing of high bits
1517 __ movl(rdx, result_high_word);
1518 __ mov(rcx, rax);
1519 // What the heck is the point of the next instruction???
1520 __ xorl(rcx, 0x0);
1521 __ movl(rsi, 0x80000000);
1522 __ xorl(rsi, rdx);
1523 __ orl(rcx, rsi);
1524 __ jcc(Assembler::notEqual, do_return);
1525 __ fldz();
1526 __ fcomp_d(value_low_word);
1527 __ fnstsw_ax();
1528 #ifdef _LP64
1529 __ testl(rax, 0x4100); // ZF & CF == 0
1530 __ jcc(Assembler::equal, return_min_jlong);
1531 #else
1532 __ sahf();
1533 __ jcc(Assembler::above, return_min_jlong);
1534 #endif // _LP64
1535 // return max_jlong
1536 #ifndef _LP64
1537 __ movl(rdx, 0x7fffffff);
1538 __ movl(rax, 0xffffffff);
1539 #else
1540 __ mov64(rax, CONST64(0x7fffffffffffffff));
1541 #endif // _LP64
1542 __ jmp(do_return);
1543
1544 __ bind(return_min_jlong);
1545 #ifndef _LP64
1546 __ movl(rdx, 0x80000000);
1547 __ xorl(rax, rax);
1548 #else
1549 __ mov64(rax, UCONST64(0x8000000000000000));
1550 #endif // _LP64
1551 __ jmp(do_return);
1552
1553 __ bind(return0);
1554 __ fpop();
1555 #ifndef _LP64
1556 __ xorptr(rdx,rdx);
1557 __ xorptr(rax,rax);
1558 #else
1559 __ xorptr(rax, rax);
1560 #endif // _LP64
1561
1562 __ bind(do_return);
1563 __ addptr(rsp, 32);
1564 LP64_ONLY(__ pop(rdx);)
1565 __ pop(rcx);
1566 __ pop(rsi);
1567 __ ret(0);
1568 }
1569 break;
1570
1571 case predicate_failed_trap_id:
1572 {
1573 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1574
1575 OopMap* map = save_live_registers(sasm, 1);
1576
1577 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1578 oop_maps = new OopMapSet();
1579 oop_maps->add_gc_map(call_offset, map);
1580 restore_live_registers(sasm);
1581 __ leave();
1582 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1583 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1584
1585 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1586 }
1587 break;
1588
1589 default:
1590 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1591 __ movptr(rax, (int)id);
1592 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1593 __ should_not_reach_here();
1594 }
1595 break;
1596 }
1597 return oop_maps;
1598 }
1599
1600 #undef __
1601
pd_name_for_address(address entry)1602 const char *Runtime1::pd_name_for_address(address entry) {
1603 return "<unknown function>";
1604 }
1605