1 /*
2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/nativeInst.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "gc/shared/gcLocker.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "logging/log.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/compiledICHolder.hpp"
37 #include "runtime/safepointMechanism.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/vframeArray.hpp"
40 #include "runtime/vm_version.hpp"
41 #include "utilities/align.hpp"
42 #include "vmreg_x86.inline.hpp"
43 #ifdef COMPILER1
44 #include "c1/c1_Runtime1.hpp"
45 #endif
46 #ifdef COMPILER2
47 #include "opto/runtime.hpp"
48 #endif
49
50 #define __ masm->
51
52 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
53
54 class RegisterSaver {
55 // Capture info about frame layout
56 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
57 enum layout {
58 fpu_state_off = 0,
59 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
60 st0_off, st0H_off,
61 st1_off, st1H_off,
62 st2_off, st2H_off,
63 st3_off, st3H_off,
64 st4_off, st4H_off,
65 st5_off, st5H_off,
66 st6_off, st6H_off,
67 st7_off, st7H_off,
68 xmm_off,
69 DEF_XMM_OFFS(0),
70 DEF_XMM_OFFS(1),
71 DEF_XMM_OFFS(2),
72 DEF_XMM_OFFS(3),
73 DEF_XMM_OFFS(4),
74 DEF_XMM_OFFS(5),
75 DEF_XMM_OFFS(6),
76 DEF_XMM_OFFS(7),
77 flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
78 rdi_off,
79 rsi_off,
80 ignore_off, // extra copy of rbp,
81 rsp_off,
82 rbx_off,
83 rdx_off,
84 rcx_off,
85 rax_off,
86 // The frame sender code expects that rbp will be in the "natural" place and
87 // will override any oopMap setting for it. We must therefore force the layout
88 // so that it agrees with the frame sender code.
89 rbp_off,
90 return_off, // slot for return address
91 reg_save_size };
92 enum { FPU_regs_live = flags_off - fpu_state_end };
93
94 public:
95
96 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
97 int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
98 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
99
rax_offset()100 static int rax_offset() { return rax_off; }
rbx_offset()101 static int rbx_offset() { return rbx_off; }
102
103 // Offsets into the register save area
104 // Used by deoptimization when it is managing result register
105 // values on its own
106
raxOffset(void)107 static int raxOffset(void) { return rax_off; }
rdxOffset(void)108 static int rdxOffset(void) { return rdx_off; }
rbxOffset(void)109 static int rbxOffset(void) { return rbx_off; }
xmm0Offset(void)110 static int xmm0Offset(void) { return xmm0_off; }
111 // This really returns a slot in the fp save area, which one is not important
fpResultOffset(void)112 static int fpResultOffset(void) { return st0_off; }
113
114 // During deoptimization only the result register need to be restored
115 // all the other values have already been extracted.
116
117 static void restore_result_registers(MacroAssembler* masm);
118
119 };
120
save_live_registers(MacroAssembler * masm,int additional_frame_words,int * total_frame_words,bool verify_fpu,bool save_vectors)121 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
122 int* total_frame_words, bool verify_fpu, bool save_vectors) {
123 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
124 int ymm_bytes = num_xmm_regs * 16;
125 int zmm_bytes = num_xmm_regs * 32;
126 #ifdef COMPILER2
127 if (save_vectors) {
128 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
129 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
130 // Save upper half of YMM registers
131 int vect_bytes = ymm_bytes;
132 if (UseAVX > 2) {
133 // Save upper half of ZMM registers as well
134 vect_bytes += zmm_bytes;
135 }
136 additional_frame_words += vect_bytes / wordSize;
137 }
138 #else
139 assert(!save_vectors, "vectors are generated only by C2");
140 #endif
141 int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
142 int frame_words = frame_size_in_bytes / wordSize;
143 *total_frame_words = frame_words;
144
145 assert(FPUStateSizeInWords == 27, "update stack layout");
146
147 // save registers, fpu state, and flags
148 // We assume caller has already has return address slot on the stack
149 // We push epb twice in this sequence because we want the real rbp,
150 // to be under the return like a normal enter and we want to use pusha
151 // We push by hand instead of using push.
152 __ enter();
153 __ pusha();
154 __ pushf();
155 __ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
156 __ push_FPU_state(); // Save FPU state & init
157
158 if (verify_fpu) {
159 // Some stubs may have non standard FPU control word settings so
160 // only check and reset the value when it required to be the
161 // standard value. The safepoint blob in particular can be used
162 // in methods which are using the 24 bit control word for
163 // optimized float math.
164
165 #ifdef ASSERT
166 // Make sure the control word has the expected value
167 Label ok;
168 __ cmpw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
169 __ jccb(Assembler::equal, ok);
170 __ stop("corrupted control word detected");
171 __ bind(ok);
172 #endif
173
174 // Reset the control word to guard against exceptions being unmasked
175 // since fstp_d can cause FPU stack underflow exceptions. Write it
176 // into the on stack copy and then reload that to make sure that the
177 // current and future values are correct.
178 __ movw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
179 }
180
181 __ frstor(Address(rsp, 0));
182 if (!verify_fpu) {
183 // Set the control word so that exceptions are masked for the
184 // following code.
185 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
186 }
187
188 int off = st0_off;
189 int delta = st1_off - off;
190
191 // Save the FPU registers in de-opt-able form
192 for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
193 __ fstp_d(Address(rsp, off*wordSize));
194 off += delta;
195 }
196
197 off = xmm0_off;
198 delta = xmm1_off - off;
199 if(UseSSE == 1) {
200 // Save the XMM state
201 for (int n = 0; n < num_xmm_regs; n++) {
202 __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
203 off += delta;
204 }
205 } else if(UseSSE >= 2) {
206 // Save whole 128bit (16 bytes) XMM registers
207 for (int n = 0; n < num_xmm_regs; n++) {
208 __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
209 off += delta;
210 }
211 }
212
213 if (save_vectors) {
214 __ subptr(rsp, ymm_bytes);
215 // Save upper half of YMM registers
216 for (int n = 0; n < num_xmm_regs; n++) {
217 __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
218 }
219 if (UseAVX > 2) {
220 __ subptr(rsp, zmm_bytes);
221 // Save upper half of ZMM registers
222 for (int n = 0; n < num_xmm_regs; n++) {
223 __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
224 }
225 }
226 }
227 __ vzeroupper();
228
229 // Set an oopmap for the call site. This oopmap will map all
230 // oop-registers and debug-info registers as callee-saved. This
231 // will allow deoptimization at this safepoint to find all possible
232 // debug-info recordings, as well as let GC find all oops.
233
234 OopMapSet *oop_maps = new OopMapSet();
235 OopMap* map = new OopMap( frame_words, 0 );
236
237 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
238 #define NEXTREG(x) (x)->as_VMReg()->next()
239
240 map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
241 map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
242 map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
243 map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
244 // rbp, location is known implicitly, no oopMap
245 map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
246 map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
247 // %%% This is really a waste but we'll keep things as they were for now for the upper component
248 off = st0_off;
249 delta = st1_off - off;
250 for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
251 FloatRegister freg_name = as_FloatRegister(n);
252 map->set_callee_saved(STACK_OFFSET(off), freg_name->as_VMReg());
253 map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(freg_name));
254 off += delta;
255 }
256 off = xmm0_off;
257 delta = xmm1_off - off;
258 for (int n = 0; n < num_xmm_regs; n++) {
259 XMMRegister xmm_name = as_XMMRegister(n);
260 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
261 map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
262 off += delta;
263 }
264 #undef NEXTREG
265 #undef STACK_OFFSET
266
267 return map;
268 }
269
restore_live_registers(MacroAssembler * masm,bool restore_vectors)270 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
271 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
272 int ymm_bytes = num_xmm_regs * 16;
273 int zmm_bytes = num_xmm_regs * 32;
274 // Recover XMM & FPU state
275 int additional_frame_bytes = 0;
276 #ifdef COMPILER2
277 if (restore_vectors) {
278 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
279 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
280 // Save upper half of YMM registers
281 additional_frame_bytes = ymm_bytes;
282 if (UseAVX > 2) {
283 // Save upper half of ZMM registers as well
284 additional_frame_bytes += zmm_bytes;
285 }
286 }
287 #else
288 assert(!restore_vectors, "vectors are generated only by C2");
289 #endif
290
291 int off = xmm0_off;
292 int delta = xmm1_off - off;
293
294 __ vzeroupper();
295
296 if (UseSSE == 1) {
297 // Restore XMM registers
298 assert(additional_frame_bytes == 0, "");
299 for (int n = 0; n < num_xmm_regs; n++) {
300 __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
301 off += delta;
302 }
303 } else if (UseSSE >= 2) {
304 // Restore whole 128bit (16 bytes) XMM registers. Do this before restoring YMM and
305 // ZMM because the movdqu instruction zeros the upper part of the XMM register.
306 for (int n = 0; n < num_xmm_regs; n++) {
307 __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
308 off += delta;
309 }
310 }
311
312 if (restore_vectors) {
313 if (UseAVX > 2) {
314 // Restore upper half of ZMM registers.
315 for (int n = 0; n < num_xmm_regs; n++) {
316 __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32));
317 }
318 __ addptr(rsp, zmm_bytes);
319 }
320 // Restore upper half of YMM registers.
321 for (int n = 0; n < num_xmm_regs; n++) {
322 __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16));
323 }
324 __ addptr(rsp, ymm_bytes);
325 }
326
327 __ pop_FPU_state();
328 __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
329
330 __ popf();
331 __ popa();
332 // Get the rbp, described implicitly by the frame sender code (no oopMap)
333 __ pop(rbp);
334 }
335
restore_result_registers(MacroAssembler * masm)336 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
337
338 // Just restore result register. Only used by deoptimization. By
339 // now any callee save register that needs to be restore to a c2
340 // caller of the deoptee has been extracted into the vframeArray
341 // and will be stuffed into the c2i adapter we create for later
342 // restoration so only result registers need to be restored here.
343 //
344
345 __ frstor(Address(rsp, 0)); // Restore fpu state
346
347 // Recover XMM & FPU state
348 if( UseSSE == 1 ) {
349 __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
350 } else if( UseSSE >= 2 ) {
351 __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
352 }
353 __ movptr(rax, Address(rsp, rax_off*wordSize));
354 __ movptr(rdx, Address(rsp, rdx_off*wordSize));
355 // Pop all of the register save are off the stack except the return address
356 __ addptr(rsp, return_off * wordSize);
357 }
358
359 // Is vector's size (in bytes) bigger than a size saved by default?
360 // 16 bytes XMM registers are saved by default using SSE2 movdqu instructions.
361 // Note, MaxVectorSize == 0 with UseSSE < 2 and vectors are not generated.
is_wide_vector(int size)362 bool SharedRuntime::is_wide_vector(int size) {
363 return size > 16;
364 }
365
trampoline_size()366 size_t SharedRuntime::trampoline_size() {
367 return 16;
368 }
369
generate_trampoline(MacroAssembler * masm,address destination)370 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
371 __ jump(RuntimeAddress(destination));
372 }
373
374 // The java_calling_convention describes stack locations as ideal slots on
375 // a frame with no abi restrictions. Since we must observe abi restrictions
376 // (like the placement of the register window) the slots must be biased by
377 // the following value.
reg2offset_in(VMReg r)378 static int reg2offset_in(VMReg r) {
379 // Account for saved rbp, and return address
380 // This should really be in_preserve_stack_slots
381 return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
382 }
383
reg2offset_out(VMReg r)384 static int reg2offset_out(VMReg r) {
385 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
386 }
387
388 // ---------------------------------------------------------------------------
389 // Read the array of BasicTypes from a signature, and compute where the
390 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
391 // quantities. Values less than SharedInfo::stack0 are registers, those above
392 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
393 // as framesizes are fixed.
394 // VMRegImpl::stack0 refers to the first slot 0(sp).
395 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
396 // up to RegisterImpl::number_of_registers) are the 32-bit
397 // integer registers.
398
399 // Pass first two oop/int args in registers ECX and EDX.
400 // Pass first two float/double args in registers XMM0 and XMM1.
401 // Doubles have precedence, so if you pass a mix of floats and doubles
402 // the doubles will grab the registers before the floats will.
403
404 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
405 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
406 // units regardless of build. Of course for i486 there is no 64 bit build
407
408
409 // ---------------------------------------------------------------------------
410 // The compiled Java calling convention.
411 // Pass first two oop/int args in registers ECX and EDX.
412 // Pass first two float/double args in registers XMM0 and XMM1.
413 // Doubles have precedence, so if you pass a mix of floats and doubles
414 // the doubles will grab the registers before the floats will.
java_calling_convention(const BasicType * sig_bt,VMRegPair * regs,int total_args_passed,int is_outgoing)415 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
416 VMRegPair *regs,
417 int total_args_passed,
418 int is_outgoing) {
419 uint stack = 0; // Starting stack position for args on stack
420
421
422 // Pass first two oop/int args in registers ECX and EDX.
423 uint reg_arg0 = 9999;
424 uint reg_arg1 = 9999;
425
426 // Pass first two float/double args in registers XMM0 and XMM1.
427 // Doubles have precedence, so if you pass a mix of floats and doubles
428 // the doubles will grab the registers before the floats will.
429 // CNC - TURNED OFF FOR non-SSE.
430 // On Intel we have to round all doubles (and most floats) at
431 // call sites by storing to the stack in any case.
432 // UseSSE=0 ==> Don't Use ==> 9999+0
433 // UseSSE=1 ==> Floats only ==> 9999+1
434 // UseSSE>=2 ==> Floats or doubles ==> 9999+2
435 enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
436 uint fargs = (UseSSE>=2) ? 2 : UseSSE;
437 uint freg_arg0 = 9999+fargs;
438 uint freg_arg1 = 9999+fargs;
439
440 // Pass doubles & longs aligned on the stack. First count stack slots for doubles
441 int i;
442 for( i = 0; i < total_args_passed; i++) {
443 if( sig_bt[i] == T_DOUBLE ) {
444 // first 2 doubles go in registers
445 if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
446 else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
447 else // Else double is passed low on the stack to be aligned.
448 stack += 2;
449 } else if( sig_bt[i] == T_LONG ) {
450 stack += 2;
451 }
452 }
453 int dstack = 0; // Separate counter for placing doubles
454
455 // Now pick where all else goes.
456 for( i = 0; i < total_args_passed; i++) {
457 // From the type and the argument number (count) compute the location
458 switch( sig_bt[i] ) {
459 case T_SHORT:
460 case T_CHAR:
461 case T_BYTE:
462 case T_BOOLEAN:
463 case T_INT:
464 case T_ARRAY:
465 case T_OBJECT:
466 case T_ADDRESS:
467 if( reg_arg0 == 9999 ) {
468 reg_arg0 = i;
469 regs[i].set1(rcx->as_VMReg());
470 } else if( reg_arg1 == 9999 ) {
471 reg_arg1 = i;
472 regs[i].set1(rdx->as_VMReg());
473 } else {
474 regs[i].set1(VMRegImpl::stack2reg(stack++));
475 }
476 break;
477 case T_FLOAT:
478 if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
479 freg_arg0 = i;
480 regs[i].set1(xmm0->as_VMReg());
481 } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
482 freg_arg1 = i;
483 regs[i].set1(xmm1->as_VMReg());
484 } else {
485 regs[i].set1(VMRegImpl::stack2reg(stack++));
486 }
487 break;
488 case T_LONG:
489 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
490 regs[i].set2(VMRegImpl::stack2reg(dstack));
491 dstack += 2;
492 break;
493 case T_DOUBLE:
494 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
495 if( freg_arg0 == (uint)i ) {
496 regs[i].set2(xmm0->as_VMReg());
497 } else if( freg_arg1 == (uint)i ) {
498 regs[i].set2(xmm1->as_VMReg());
499 } else {
500 regs[i].set2(VMRegImpl::stack2reg(dstack));
501 dstack += 2;
502 }
503 break;
504 case T_VOID: regs[i].set_bad(); break;
505 break;
506 default:
507 ShouldNotReachHere();
508 break;
509 }
510 }
511
512 // return value can be odd number of VMRegImpl stack slots make multiple of 2
513 return align_up(stack, 2);
514 }
515
516 // Patch the callers callsite with entry to compiled code if it exists.
patch_callers_callsite(MacroAssembler * masm)517 static void patch_callers_callsite(MacroAssembler *masm) {
518 Label L;
519 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
520 __ jcc(Assembler::equal, L);
521 // Schedule the branch target address early.
522 // Call into the VM to patch the caller, then jump to compiled callee
523 // rax, isn't live so capture return address while we easily can
524 __ movptr(rax, Address(rsp, 0));
525 __ pusha();
526 __ pushf();
527
528 if (UseSSE == 1) {
529 __ subptr(rsp, 2*wordSize);
530 __ movflt(Address(rsp, 0), xmm0);
531 __ movflt(Address(rsp, wordSize), xmm1);
532 }
533 if (UseSSE >= 2) {
534 __ subptr(rsp, 4*wordSize);
535 __ movdbl(Address(rsp, 0), xmm0);
536 __ movdbl(Address(rsp, 2*wordSize), xmm1);
537 }
538 #ifdef COMPILER2
539 // C2 may leave the stack dirty if not in SSE2+ mode
540 if (UseSSE >= 2) {
541 __ verify_FPU(0, "c2i transition should have clean FPU stack");
542 } else {
543 __ empty_FPU_stack();
544 }
545 #endif /* COMPILER2 */
546
547 // VM needs caller's callsite
548 __ push(rax);
549 // VM needs target method
550 __ push(rbx);
551 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
552 __ addptr(rsp, 2*wordSize);
553
554 if (UseSSE == 1) {
555 __ movflt(xmm0, Address(rsp, 0));
556 __ movflt(xmm1, Address(rsp, wordSize));
557 __ addptr(rsp, 2*wordSize);
558 }
559 if (UseSSE >= 2) {
560 __ movdbl(xmm0, Address(rsp, 0));
561 __ movdbl(xmm1, Address(rsp, 2*wordSize));
562 __ addptr(rsp, 4*wordSize);
563 }
564
565 __ popf();
566 __ popa();
567 __ bind(L);
568 }
569
570
move_c2i_double(MacroAssembler * masm,XMMRegister r,int st_off)571 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
572 int next_off = st_off - Interpreter::stackElementSize;
573 __ movdbl(Address(rsp, next_off), r);
574 }
575
gen_c2i_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,Label & skip_fixup)576 static void gen_c2i_adapter(MacroAssembler *masm,
577 int total_args_passed,
578 int comp_args_on_stack,
579 const BasicType *sig_bt,
580 const VMRegPair *regs,
581 Label& skip_fixup) {
582 // Before we get into the guts of the C2I adapter, see if we should be here
583 // at all. We've come from compiled code and are attempting to jump to the
584 // interpreter, which means the caller made a static call to get here
585 // (vcalls always get a compiled target if there is one). Check for a
586 // compiled target. If there is one, we need to patch the caller's call.
587 patch_callers_callsite(masm);
588
589 __ bind(skip_fixup);
590
591 #ifdef COMPILER2
592 // C2 may leave the stack dirty if not in SSE2+ mode
593 if (UseSSE >= 2) {
594 __ verify_FPU(0, "c2i transition should have clean FPU stack");
595 } else {
596 __ empty_FPU_stack();
597 }
598 #endif /* COMPILER2 */
599
600 // Since all args are passed on the stack, total_args_passed * interpreter_
601 // stack_element_size is the
602 // space we need.
603 int extraspace = total_args_passed * Interpreter::stackElementSize;
604
605 // Get return address
606 __ pop(rax);
607
608 // set senderSP value
609 __ movptr(rsi, rsp);
610
611 __ subptr(rsp, extraspace);
612
613 // Now write the args into the outgoing interpreter space
614 for (int i = 0; i < total_args_passed; i++) {
615 if (sig_bt[i] == T_VOID) {
616 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
617 continue;
618 }
619
620 // st_off points to lowest address on stack.
621 int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
622 int next_off = st_off - Interpreter::stackElementSize;
623
624 // Say 4 args:
625 // i st_off
626 // 0 12 T_LONG
627 // 1 8 T_VOID
628 // 2 4 T_OBJECT
629 // 3 0 T_BOOL
630 VMReg r_1 = regs[i].first();
631 VMReg r_2 = regs[i].second();
632 if (!r_1->is_valid()) {
633 assert(!r_2->is_valid(), "");
634 continue;
635 }
636
637 if (r_1->is_stack()) {
638 // memory to memory use fpu stack top
639 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
640
641 if (!r_2->is_valid()) {
642 __ movl(rdi, Address(rsp, ld_off));
643 __ movptr(Address(rsp, st_off), rdi);
644 } else {
645
646 // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
647 // st_off == MSW, st_off-wordSize == LSW
648
649 __ movptr(rdi, Address(rsp, ld_off));
650 __ movptr(Address(rsp, next_off), rdi);
651 #ifndef _LP64
652 __ movptr(rdi, Address(rsp, ld_off + wordSize));
653 __ movptr(Address(rsp, st_off), rdi);
654 #else
655 #ifdef ASSERT
656 // Overwrite the unused slot with known junk
657 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
658 __ movptr(Address(rsp, st_off), rax);
659 #endif /* ASSERT */
660 #endif // _LP64
661 }
662 } else if (r_1->is_Register()) {
663 Register r = r_1->as_Register();
664 if (!r_2->is_valid()) {
665 __ movl(Address(rsp, st_off), r);
666 } else {
667 // long/double in gpr
668 NOT_LP64(ShouldNotReachHere());
669 // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
670 // T_DOUBLE and T_LONG use two slots in the interpreter
671 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
672 // long/double in gpr
673 #ifdef ASSERT
674 // Overwrite the unused slot with known junk
675 LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
676 __ movptr(Address(rsp, st_off), rax);
677 #endif /* ASSERT */
678 __ movptr(Address(rsp, next_off), r);
679 } else {
680 __ movptr(Address(rsp, st_off), r);
681 }
682 }
683 } else {
684 assert(r_1->is_XMMRegister(), "");
685 if (!r_2->is_valid()) {
686 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
687 } else {
688 assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
689 move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
690 }
691 }
692 }
693
694 // Schedule the branch target address early.
695 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
696 // And repush original return address
697 __ push(rax);
698 __ jmp(rcx);
699 }
700
701
move_i2c_double(MacroAssembler * masm,XMMRegister r,Register saved_sp,int ld_off)702 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
703 int next_val_off = ld_off - Interpreter::stackElementSize;
704 __ movdbl(r, Address(saved_sp, next_val_off));
705 }
706
range_check(MacroAssembler * masm,Register pc_reg,Register temp_reg,address code_start,address code_end,Label & L_ok)707 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
708 address code_start, address code_end,
709 Label& L_ok) {
710 Label L_fail;
711 __ lea(temp_reg, ExternalAddress(code_start));
712 __ cmpptr(pc_reg, temp_reg);
713 __ jcc(Assembler::belowEqual, L_fail);
714 __ lea(temp_reg, ExternalAddress(code_end));
715 __ cmpptr(pc_reg, temp_reg);
716 __ jcc(Assembler::below, L_ok);
717 __ bind(L_fail);
718 }
719
gen_i2c_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs)720 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
721 int total_args_passed,
722 int comp_args_on_stack,
723 const BasicType *sig_bt,
724 const VMRegPair *regs) {
725 // Note: rsi contains the senderSP on entry. We must preserve it since
726 // we may do a i2c -> c2i transition if we lose a race where compiled
727 // code goes non-entrant while we get args ready.
728
729 // Adapters can be frameless because they do not require the caller
730 // to perform additional cleanup work, such as correcting the stack pointer.
731 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
732 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
733 // even if a callee has modified the stack pointer.
734 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
735 // routinely repairs its caller's stack pointer (from sender_sp, which is set
736 // up via the senderSP register).
737 // In other words, if *either* the caller or callee is interpreted, we can
738 // get the stack pointer repaired after a call.
739 // This is why c2i and i2c adapters cannot be indefinitely composed.
740 // In particular, if a c2i adapter were to somehow call an i2c adapter,
741 // both caller and callee would be compiled methods, and neither would
742 // clean up the stack pointer changes performed by the two adapters.
743 // If this happens, control eventually transfers back to the compiled
744 // caller, but with an uncorrected stack, causing delayed havoc.
745
746 // Pick up the return address
747 __ movptr(rax, Address(rsp, 0));
748
749 if (VerifyAdapterCalls &&
750 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
751 // So, let's test for cascading c2i/i2c adapters right now.
752 // assert(Interpreter::contains($return_addr) ||
753 // StubRoutines::contains($return_addr),
754 // "i2c adapter must return to an interpreter frame");
755 __ block_comment("verify_i2c { ");
756 Label L_ok;
757 if (Interpreter::code() != NULL)
758 range_check(masm, rax, rdi,
759 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
760 L_ok);
761 if (StubRoutines::code1() != NULL)
762 range_check(masm, rax, rdi,
763 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
764 L_ok);
765 if (StubRoutines::code2() != NULL)
766 range_check(masm, rax, rdi,
767 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
768 L_ok);
769 const char* msg = "i2c adapter must return to an interpreter frame";
770 __ block_comment(msg);
771 __ stop(msg);
772 __ bind(L_ok);
773 __ block_comment("} verify_i2ce ");
774 }
775
776 // Must preserve original SP for loading incoming arguments because
777 // we need to align the outgoing SP for compiled code.
778 __ movptr(rdi, rsp);
779
780 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
781 // in registers, we will occasionally have no stack args.
782 int comp_words_on_stack = 0;
783 if (comp_args_on_stack) {
784 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
785 // registers are below. By subtracting stack0, we either get a negative
786 // number (all values in registers) or the maximum stack slot accessed.
787 // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
788 // Convert 4-byte stack slots to words.
789 comp_words_on_stack = align_up(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
790 // Round up to miminum stack alignment, in wordSize
791 comp_words_on_stack = align_up(comp_words_on_stack, 2);
792 __ subptr(rsp, comp_words_on_stack * wordSize);
793 }
794
795 // Align the outgoing SP
796 __ andptr(rsp, -(StackAlignmentInBytes));
797
798 // push the return address on the stack (note that pushing, rather
799 // than storing it, yields the correct frame alignment for the callee)
800 __ push(rax);
801
802 // Put saved SP in another register
803 const Register saved_sp = rax;
804 __ movptr(saved_sp, rdi);
805
806
807 // Will jump to the compiled code just as if compiled code was doing it.
808 // Pre-load the register-jump target early, to schedule it better.
809 __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
810
811 // Now generate the shuffle code. Pick up all register args and move the
812 // rest through the floating point stack top.
813 for (int i = 0; i < total_args_passed; i++) {
814 if (sig_bt[i] == T_VOID) {
815 // Longs and doubles are passed in native word order, but misaligned
816 // in the 32-bit build.
817 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
818 continue;
819 }
820
821 // Pick up 0, 1 or 2 words from SP+offset.
822
823 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
824 "scrambled load targets?");
825 // Load in argument order going down.
826 int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
827 // Point to interpreter value (vs. tag)
828 int next_off = ld_off - Interpreter::stackElementSize;
829 //
830 //
831 //
832 VMReg r_1 = regs[i].first();
833 VMReg r_2 = regs[i].second();
834 if (!r_1->is_valid()) {
835 assert(!r_2->is_valid(), "");
836 continue;
837 }
838 if (r_1->is_stack()) {
839 // Convert stack slot to an SP offset (+ wordSize to account for return address )
840 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
841
842 // We can use rsi as a temp here because compiled code doesn't need rsi as an input
843 // and if we end up going thru a c2i because of a miss a reasonable value of rsi
844 // we be generated.
845 if (!r_2->is_valid()) {
846 // __ fld_s(Address(saved_sp, ld_off));
847 // __ fstp_s(Address(rsp, st_off));
848 __ movl(rsi, Address(saved_sp, ld_off));
849 __ movptr(Address(rsp, st_off), rsi);
850 } else {
851 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
852 // are accessed as negative so LSW is at LOW address
853
854 // ld_off is MSW so get LSW
855 // st_off is LSW (i.e. reg.first())
856 // __ fld_d(Address(saved_sp, next_off));
857 // __ fstp_d(Address(rsp, st_off));
858 //
859 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
860 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
861 // So we must adjust where to pick up the data to match the interpreter.
862 //
863 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
864 // are accessed as negative so LSW is at LOW address
865
866 // ld_off is MSW so get LSW
867 const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
868 next_off : ld_off;
869 __ movptr(rsi, Address(saved_sp, offset));
870 __ movptr(Address(rsp, st_off), rsi);
871 #ifndef _LP64
872 __ movptr(rsi, Address(saved_sp, ld_off));
873 __ movptr(Address(rsp, st_off + wordSize), rsi);
874 #endif // _LP64
875 }
876 } else if (r_1->is_Register()) { // Register argument
877 Register r = r_1->as_Register();
878 assert(r != rax, "must be different");
879 if (r_2->is_valid()) {
880 //
881 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
882 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
883 // So we must adjust where to pick up the data to match the interpreter.
884
885 const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
886 next_off : ld_off;
887
888 // this can be a misaligned move
889 __ movptr(r, Address(saved_sp, offset));
890 #ifndef _LP64
891 assert(r_2->as_Register() != rax, "need another temporary register");
892 // Remember r_1 is low address (and LSB on x86)
893 // So r_2 gets loaded from high address regardless of the platform
894 __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
895 #endif // _LP64
896 } else {
897 __ movl(r, Address(saved_sp, ld_off));
898 }
899 } else {
900 assert(r_1->is_XMMRegister(), "");
901 if (!r_2->is_valid()) {
902 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
903 } else {
904 move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
905 }
906 }
907 }
908
909 // 6243940 We might end up in handle_wrong_method if
910 // the callee is deoptimized as we race thru here. If that
911 // happens we don't want to take a safepoint because the
912 // caller frame will look interpreted and arguments are now
913 // "compiled" so it is much better to make this transition
914 // invisible to the stack walking code. Unfortunately if
915 // we try and find the callee by normal means a safepoint
916 // is possible. So we stash the desired callee in the thread
917 // and the vm will find there should this case occur.
918
919 __ get_thread(rax);
920 __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
921
922 // move Method* to rax, in case we end up in an c2i adapter.
923 // the c2i adapters expect Method* in rax, (c2) because c2's
924 // resolve stubs return the result (the method) in rax,.
925 // I'd love to fix this.
926 __ mov(rax, rbx);
927
928 __ jmp(rdi);
929 }
930
931 // ---------------------------------------------------------------
generate_i2c2i_adapters(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,AdapterFingerPrint * fingerprint)932 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
933 int total_args_passed,
934 int comp_args_on_stack,
935 const BasicType *sig_bt,
936 const VMRegPair *regs,
937 AdapterFingerPrint* fingerprint) {
938 address i2c_entry = __ pc();
939
940 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
941
942 // -------------------------------------------------------------------------
943 // Generate a C2I adapter. On entry we know rbx, holds the Method* during calls
944 // to the interpreter. The args start out packed in the compiled layout. They
945 // need to be unpacked into the interpreter layout. This will almost always
946 // require some stack space. We grow the current (compiled) stack, then repack
947 // the args. We finally end in a jump to the generic interpreter entry point.
948 // On exit from the interpreter, the interpreter will restore our SP (lest the
949 // compiled code, which relys solely on SP and not EBP, get sick).
950
951 address c2i_unverified_entry = __ pc();
952 Label skip_fixup;
953
954 Register holder = rax;
955 Register receiver = rcx;
956 Register temp = rbx;
957
958 {
959
960 Label missed;
961 __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
962 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
963 __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
964 __ jcc(Assembler::notEqual, missed);
965 // Method might have been compiled since the call site was patched to
966 // interpreted if that is the case treat it as a miss so we can get
967 // the call site corrected.
968 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
969 __ jcc(Assembler::equal, skip_fixup);
970
971 __ bind(missed);
972 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
973 }
974
975 address c2i_entry = __ pc();
976
977 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
978
979 __ flush();
980 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
981 }
982
c_calling_convention(const BasicType * sig_bt,VMRegPair * regs,VMRegPair * regs2,int total_args_passed)983 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
984 VMRegPair *regs,
985 VMRegPair *regs2,
986 int total_args_passed) {
987 assert(regs2 == NULL, "not needed on x86");
988 // We return the amount of VMRegImpl stack slots we need to reserve for all
989 // the arguments NOT counting out_preserve_stack_slots.
990
991 uint stack = 0; // All arguments on stack
992
993 for( int i = 0; i < total_args_passed; i++) {
994 // From the type and the argument number (count) compute the location
995 switch( sig_bt[i] ) {
996 case T_BOOLEAN:
997 case T_CHAR:
998 case T_FLOAT:
999 case T_BYTE:
1000 case T_SHORT:
1001 case T_INT:
1002 case T_OBJECT:
1003 case T_ARRAY:
1004 case T_ADDRESS:
1005 case T_METADATA:
1006 regs[i].set1(VMRegImpl::stack2reg(stack++));
1007 break;
1008 case T_LONG:
1009 case T_DOUBLE: // The stack numbering is reversed from Java
1010 // Since C arguments do not get reversed, the ordering for
1011 // doubles on the stack must be opposite the Java convention
1012 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
1013 regs[i].set2(VMRegImpl::stack2reg(stack));
1014 stack += 2;
1015 break;
1016 case T_VOID: regs[i].set_bad(); break;
1017 default:
1018 ShouldNotReachHere();
1019 break;
1020 }
1021 }
1022 return stack;
1023 }
1024
1025 // A simple move of integer like type
simple_move32(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1026 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1027 if (src.first()->is_stack()) {
1028 if (dst.first()->is_stack()) {
1029 // stack to stack
1030 // __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1031 // __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1032 __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
1033 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1034 } else {
1035 // stack to reg
1036 __ movl2ptr(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1037 }
1038 } else if (dst.first()->is_stack()) {
1039 // reg to stack
1040 // no need to sign extend on 64bit
1041 __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1042 } else {
1043 if (dst.first() != src.first()) {
1044 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1045 }
1046 }
1047 }
1048
1049 // An oop arg. Must pass a handle not the oop itself
object_move(MacroAssembler * masm,OopMap * map,int oop_handle_offset,int framesize_in_slots,VMRegPair src,VMRegPair dst,bool is_receiver,int * receiver_offset)1050 static void object_move(MacroAssembler* masm,
1051 OopMap* map,
1052 int oop_handle_offset,
1053 int framesize_in_slots,
1054 VMRegPair src,
1055 VMRegPair dst,
1056 bool is_receiver,
1057 int* receiver_offset) {
1058
1059 // Because of the calling conventions we know that src can be a
1060 // register or a stack location. dst can only be a stack location.
1061
1062 assert(dst.first()->is_stack(), "must be stack");
1063 // must pass a handle. First figure out the location we use as a handle
1064
1065 if (src.first()->is_stack()) {
1066 // Oop is already on the stack as an argument
1067 Register rHandle = rax;
1068 Label nil;
1069 __ xorptr(rHandle, rHandle);
1070 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1071 __ jcc(Assembler::equal, nil);
1072 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1073 __ bind(nil);
1074 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1075
1076 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1077 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1078 if (is_receiver) {
1079 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1080 }
1081 } else {
1082 // Oop is in an a register we must store it to the space we reserve
1083 // on the stack for oop_handles
1084 const Register rOop = src.first()->as_Register();
1085 const Register rHandle = rax;
1086 int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1087 int offset = oop_slot*VMRegImpl::stack_slot_size;
1088 Label skip;
1089 __ movptr(Address(rsp, offset), rOop);
1090 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1091 __ xorptr(rHandle, rHandle);
1092 __ cmpptr(rOop, (int32_t)NULL_WORD);
1093 __ jcc(Assembler::equal, skip);
1094 __ lea(rHandle, Address(rsp, offset));
1095 __ bind(skip);
1096 // Store the handle parameter
1097 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1098 if (is_receiver) {
1099 *receiver_offset = offset;
1100 }
1101 }
1102 }
1103
1104 // A float arg may have to do float reg int reg conversion
float_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1105 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1106 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1107
1108 // Because of the calling convention we know that src is either a stack location
1109 // or an xmm register. dst can only be a stack location.
1110
1111 assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1112
1113 if (src.first()->is_stack()) {
1114 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1115 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1116 } else {
1117 // reg to stack
1118 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1119 }
1120 }
1121
1122 // A long move
long_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1123 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1124
1125 // The only legal possibility for a long_move VMRegPair is:
1126 // 1: two stack slots (possibly unaligned)
1127 // as neither the java or C calling convention will use registers
1128 // for longs.
1129
1130 if (src.first()->is_stack() && dst.first()->is_stack()) {
1131 assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1132 __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1133 NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1134 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1135 NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1136 } else {
1137 ShouldNotReachHere();
1138 }
1139 }
1140
1141 // A double move
double_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1142 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1143
1144 // The only legal possibilities for a double_move VMRegPair are:
1145 // The painful thing here is that like long_move a VMRegPair might be
1146
1147 // Because of the calling convention we know that src is either
1148 // 1: a single physical register (xmm registers only)
1149 // 2: two stack slots (possibly unaligned)
1150 // dst can only be a pair of stack slots.
1151
1152 assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1153
1154 if (src.first()->is_stack()) {
1155 // source is all stack
1156 __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1157 NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1158 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1159 NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1160 } else {
1161 // reg to stack
1162 // No worries about stack alignment
1163 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1164 }
1165 }
1166
1167
save_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1168 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1169 // We always ignore the frame_slots arg and just use the space just below frame pointer
1170 // which by this time is free to use
1171 switch (ret_type) {
1172 case T_FLOAT:
1173 __ fstp_s(Address(rbp, -wordSize));
1174 break;
1175 case T_DOUBLE:
1176 __ fstp_d(Address(rbp, -2*wordSize));
1177 break;
1178 case T_VOID: break;
1179 case T_LONG:
1180 __ movptr(Address(rbp, -wordSize), rax);
1181 NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx));
1182 break;
1183 default: {
1184 __ movptr(Address(rbp, -wordSize), rax);
1185 }
1186 }
1187 }
1188
restore_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1189 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1190 // We always ignore the frame_slots arg and just use the space just below frame pointer
1191 // which by this time is free to use
1192 switch (ret_type) {
1193 case T_FLOAT:
1194 __ fld_s(Address(rbp, -wordSize));
1195 break;
1196 case T_DOUBLE:
1197 __ fld_d(Address(rbp, -2*wordSize));
1198 break;
1199 case T_LONG:
1200 __ movptr(rax, Address(rbp, -wordSize));
1201 NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize)));
1202 break;
1203 case T_VOID: break;
1204 default: {
1205 __ movptr(rax, Address(rbp, -wordSize));
1206 }
1207 }
1208 }
1209
1210
save_or_restore_arguments(MacroAssembler * masm,const int stack_slots,const int total_in_args,const int arg_save_area,OopMap * map,VMRegPair * in_regs,BasicType * in_sig_bt)1211 static void save_or_restore_arguments(MacroAssembler* masm,
1212 const int stack_slots,
1213 const int total_in_args,
1214 const int arg_save_area,
1215 OopMap* map,
1216 VMRegPair* in_regs,
1217 BasicType* in_sig_bt) {
1218 // if map is non-NULL then the code should store the values,
1219 // otherwise it should load them.
1220 int handle_index = 0;
1221 // Save down double word first
1222 for ( int i = 0; i < total_in_args; i++) {
1223 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1224 int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1225 int offset = slot * VMRegImpl::stack_slot_size;
1226 handle_index += 2;
1227 assert(handle_index <= stack_slots, "overflow");
1228 if (map != NULL) {
1229 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1230 } else {
1231 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1232 }
1233 }
1234 if (in_regs[i].first()->is_Register() && in_sig_bt[i] == T_LONG) {
1235 int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1236 int offset = slot * VMRegImpl::stack_slot_size;
1237 handle_index += 2;
1238 assert(handle_index <= stack_slots, "overflow");
1239 if (map != NULL) {
1240 __ movl(Address(rsp, offset), in_regs[i].first()->as_Register());
1241 if (in_regs[i].second()->is_Register()) {
1242 __ movl(Address(rsp, offset + 4), in_regs[i].second()->as_Register());
1243 }
1244 } else {
1245 __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
1246 if (in_regs[i].second()->is_Register()) {
1247 __ movl(in_regs[i].second()->as_Register(), Address(rsp, offset + 4));
1248 }
1249 }
1250 }
1251 }
1252 // Save or restore single word registers
1253 for ( int i = 0; i < total_in_args; i++) {
1254 if (in_regs[i].first()->is_Register()) {
1255 int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1256 int offset = slot * VMRegImpl::stack_slot_size;
1257 assert(handle_index <= stack_slots, "overflow");
1258 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1259 map->set_oop(VMRegImpl::stack2reg(slot));;
1260 }
1261
1262 // Value is in an input register pass we must flush it to the stack
1263 const Register reg = in_regs[i].first()->as_Register();
1264 switch (in_sig_bt[i]) {
1265 case T_ARRAY:
1266 if (map != NULL) {
1267 __ movptr(Address(rsp, offset), reg);
1268 } else {
1269 __ movptr(reg, Address(rsp, offset));
1270 }
1271 break;
1272 case T_BOOLEAN:
1273 case T_CHAR:
1274 case T_BYTE:
1275 case T_SHORT:
1276 case T_INT:
1277 if (map != NULL) {
1278 __ movl(Address(rsp, offset), reg);
1279 } else {
1280 __ movl(reg, Address(rsp, offset));
1281 }
1282 break;
1283 case T_OBJECT:
1284 default: ShouldNotReachHere();
1285 }
1286 } else if (in_regs[i].first()->is_XMMRegister()) {
1287 if (in_sig_bt[i] == T_FLOAT) {
1288 int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1289 int offset = slot * VMRegImpl::stack_slot_size;
1290 assert(handle_index <= stack_slots, "overflow");
1291 if (map != NULL) {
1292 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1293 } else {
1294 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1295 }
1296 }
1297 } else if (in_regs[i].first()->is_stack()) {
1298 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1299 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1300 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1301 }
1302 }
1303 }
1304 }
1305
1306 // Check GCLocker::needs_gc and enter the runtime if it's true. This
1307 // keeps a new JNI critical region from starting until a GC has been
1308 // forced. Save down any oops in registers and describe them in an
1309 // OopMap.
check_needs_gc_for_critical_native(MacroAssembler * masm,Register thread,int stack_slots,int total_c_args,int total_in_args,int arg_save_area,OopMapSet * oop_maps,VMRegPair * in_regs,BasicType * in_sig_bt)1310 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1311 Register thread,
1312 int stack_slots,
1313 int total_c_args,
1314 int total_in_args,
1315 int arg_save_area,
1316 OopMapSet* oop_maps,
1317 VMRegPair* in_regs,
1318 BasicType* in_sig_bt) {
1319 __ block_comment("check GCLocker::needs_gc");
1320 Label cont;
1321 __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1322 __ jcc(Assembler::equal, cont);
1323
1324 // Save down any incoming oops and call into the runtime to halt for a GC
1325
1326 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1327
1328 save_or_restore_arguments(masm, stack_slots, total_in_args,
1329 arg_save_area, map, in_regs, in_sig_bt);
1330
1331 address the_pc = __ pc();
1332 oop_maps->add_gc_map( __ offset(), map);
1333 __ set_last_Java_frame(thread, rsp, noreg, the_pc);
1334
1335 __ block_comment("block_for_jni_critical");
1336 __ push(thread);
1337 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1338 __ increment(rsp, wordSize);
1339
1340 __ get_thread(thread);
1341 __ reset_last_Java_frame(thread, false);
1342
1343 save_or_restore_arguments(masm, stack_slots, total_in_args,
1344 arg_save_area, NULL, in_regs, in_sig_bt);
1345
1346 __ bind(cont);
1347 #ifdef ASSERT
1348 if (StressCriticalJNINatives) {
1349 // Stress register saving
1350 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1351 save_or_restore_arguments(masm, stack_slots, total_in_args,
1352 arg_save_area, map, in_regs, in_sig_bt);
1353 // Destroy argument registers
1354 for (int i = 0; i < total_in_args - 1; i++) {
1355 if (in_regs[i].first()->is_Register()) {
1356 const Register reg = in_regs[i].first()->as_Register();
1357 __ xorptr(reg, reg);
1358 } else if (in_regs[i].first()->is_XMMRegister()) {
1359 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1360 } else if (in_regs[i].first()->is_FloatRegister()) {
1361 ShouldNotReachHere();
1362 } else if (in_regs[i].first()->is_stack()) {
1363 // Nothing to do
1364 } else {
1365 ShouldNotReachHere();
1366 }
1367 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1368 i++;
1369 }
1370 }
1371
1372 save_or_restore_arguments(masm, stack_slots, total_in_args,
1373 arg_save_area, NULL, in_regs, in_sig_bt);
1374 }
1375 #endif
1376 }
1377
1378 // Unpack an array argument into a pointer to the body and the length
1379 // if the array is non-null, otherwise pass 0 for both.
unpack_array_argument(MacroAssembler * masm,VMRegPair reg,BasicType in_elem_type,VMRegPair body_arg,VMRegPair length_arg)1380 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1381 Register tmp_reg = rax;
1382 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1383 "possible collision");
1384 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1385 "possible collision");
1386
1387 // Pass the length, ptr pair
1388 Label is_null, done;
1389 VMRegPair tmp(tmp_reg->as_VMReg());
1390 if (reg.first()->is_stack()) {
1391 // Load the arg up from the stack
1392 simple_move32(masm, reg, tmp);
1393 reg = tmp;
1394 }
1395 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1396 __ jccb(Assembler::equal, is_null);
1397 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1398 simple_move32(masm, tmp, body_arg);
1399 // load the length relative to the body.
1400 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1401 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1402 simple_move32(masm, tmp, length_arg);
1403 __ jmpb(done);
1404 __ bind(is_null);
1405 // Pass zeros
1406 __ xorptr(tmp_reg, tmp_reg);
1407 simple_move32(masm, tmp, body_arg);
1408 simple_move32(masm, tmp, length_arg);
1409 __ bind(done);
1410 }
1411
verify_oop_args(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1412 static void verify_oop_args(MacroAssembler* masm,
1413 const methodHandle& method,
1414 const BasicType* sig_bt,
1415 const VMRegPair* regs) {
1416 Register temp_reg = rbx; // not part of any compiled calling seq
1417 if (VerifyOops) {
1418 for (int i = 0; i < method->size_of_parameters(); i++) {
1419 if (sig_bt[i] == T_OBJECT ||
1420 sig_bt[i] == T_ARRAY) {
1421 VMReg r = regs[i].first();
1422 assert(r->is_valid(), "bad oop arg");
1423 if (r->is_stack()) {
1424 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1425 __ verify_oop(temp_reg);
1426 } else {
1427 __ verify_oop(r->as_Register());
1428 }
1429 }
1430 }
1431 }
1432 }
1433
gen_special_dispatch(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1434 static void gen_special_dispatch(MacroAssembler* masm,
1435 const methodHandle& method,
1436 const BasicType* sig_bt,
1437 const VMRegPair* regs) {
1438 verify_oop_args(masm, method, sig_bt, regs);
1439 vmIntrinsics::ID iid = method->intrinsic_id();
1440
1441 // Now write the args into the outgoing interpreter space
1442 bool has_receiver = false;
1443 Register receiver_reg = noreg;
1444 int member_arg_pos = -1;
1445 Register member_reg = noreg;
1446 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1447 if (ref_kind != 0) {
1448 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1449 member_reg = rbx; // known to be free at this point
1450 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1451 } else if (iid == vmIntrinsics::_invokeBasic) {
1452 has_receiver = true;
1453 } else {
1454 fatal("unexpected intrinsic id %d", iid);
1455 }
1456
1457 if (member_reg != noreg) {
1458 // Load the member_arg into register, if necessary.
1459 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1460 VMReg r = regs[member_arg_pos].first();
1461 if (r->is_stack()) {
1462 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1463 } else {
1464 // no data motion is needed
1465 member_reg = r->as_Register();
1466 }
1467 }
1468
1469 if (has_receiver) {
1470 // Make sure the receiver is loaded into a register.
1471 assert(method->size_of_parameters() > 0, "oob");
1472 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1473 VMReg r = regs[0].first();
1474 assert(r->is_valid(), "bad receiver arg");
1475 if (r->is_stack()) {
1476 // Porting note: This assumes that compiled calling conventions always
1477 // pass the receiver oop in a register. If this is not true on some
1478 // platform, pick a temp and load the receiver from stack.
1479 fatal("receiver always in a register");
1480 receiver_reg = rcx; // known to be free at this point
1481 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1482 } else {
1483 // no data motion is needed
1484 receiver_reg = r->as_Register();
1485 }
1486 }
1487
1488 // Figure out which address we are really jumping to:
1489 MethodHandles::generate_method_handle_dispatch(masm, iid,
1490 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1491 }
1492
1493 // ---------------------------------------------------------------------------
1494 // Generate a native wrapper for a given method. The method takes arguments
1495 // in the Java compiled code convention, marshals them to the native
1496 // convention (handlizes oops, etc), transitions to native, makes the call,
1497 // returns to java state (possibly blocking), unhandlizes any result and
1498 // returns.
1499 //
1500 // Critical native functions are a shorthand for the use of
1501 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1502 // functions. The wrapper is expected to unpack the arguments before
1503 // passing them to the callee and perform checks before and after the
1504 // native call to ensure that they GCLocker
1505 // lock_critical/unlock_critical semantics are followed. Some other
1506 // parts of JNI setup are skipped like the tear down of the JNI handle
1507 // block and the check for pending exceptions it's impossible for them
1508 // to be thrown.
1509 //
1510 // They are roughly structured like this:
1511 // if (GCLocker::needs_gc())
1512 // SharedRuntime::block_for_jni_critical();
1513 // tranistion to thread_in_native
1514 // unpack arrray arguments and call native entry point
1515 // check for safepoint in progress
1516 // check if any thread suspend flags are set
1517 // call into JVM and possible unlock the JNI critical
1518 // if a GC was suppressed while in the critical native.
1519 // transition back to thread_in_Java
1520 // return to caller
1521 //
generate_native_wrapper(MacroAssembler * masm,const methodHandle & method,int compile_id,BasicType * in_sig_bt,VMRegPair * in_regs,BasicType ret_type,address critical_entry)1522 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1523 const methodHandle& method,
1524 int compile_id,
1525 BasicType* in_sig_bt,
1526 VMRegPair* in_regs,
1527 BasicType ret_type,
1528 address critical_entry) {
1529 if (method->is_method_handle_intrinsic()) {
1530 vmIntrinsics::ID iid = method->intrinsic_id();
1531 intptr_t start = (intptr_t)__ pc();
1532 int vep_offset = ((intptr_t)__ pc()) - start;
1533 gen_special_dispatch(masm,
1534 method,
1535 in_sig_bt,
1536 in_regs);
1537 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1538 __ flush();
1539 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1540 return nmethod::new_native_nmethod(method,
1541 compile_id,
1542 masm->code(),
1543 vep_offset,
1544 frame_complete,
1545 stack_slots / VMRegImpl::slots_per_word,
1546 in_ByteSize(-1),
1547 in_ByteSize(-1),
1548 (OopMapSet*)NULL);
1549 }
1550 bool is_critical_native = true;
1551 address native_func = critical_entry;
1552 if (native_func == NULL) {
1553 native_func = method->native_function();
1554 is_critical_native = false;
1555 }
1556 assert(native_func != NULL, "must have function");
1557
1558 // An OopMap for lock (and class if static)
1559 OopMapSet *oop_maps = new OopMapSet();
1560
1561 // We have received a description of where all the java arg are located
1562 // on entry to the wrapper. We need to convert these args to where
1563 // the jni function will expect them. To figure out where they go
1564 // we convert the java signature to a C signature by inserting
1565 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1566
1567 const int total_in_args = method->size_of_parameters();
1568 int total_c_args = total_in_args;
1569 if (!is_critical_native) {
1570 total_c_args += 1;
1571 if (method->is_static()) {
1572 total_c_args++;
1573 }
1574 } else {
1575 for (int i = 0; i < total_in_args; i++) {
1576 if (in_sig_bt[i] == T_ARRAY) {
1577 total_c_args++;
1578 }
1579 }
1580 }
1581
1582 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1583 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1584 BasicType* in_elem_bt = NULL;
1585
1586 int argc = 0;
1587 if (!is_critical_native) {
1588 out_sig_bt[argc++] = T_ADDRESS;
1589 if (method->is_static()) {
1590 out_sig_bt[argc++] = T_OBJECT;
1591 }
1592
1593 for (int i = 0; i < total_in_args ; i++ ) {
1594 out_sig_bt[argc++] = in_sig_bt[i];
1595 }
1596 } else {
1597 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1598 SignatureStream ss(method->signature());
1599 for (int i = 0; i < total_in_args ; i++ ) {
1600 if (in_sig_bt[i] == T_ARRAY) {
1601 // Arrays are passed as int, elem* pair
1602 out_sig_bt[argc++] = T_INT;
1603 out_sig_bt[argc++] = T_ADDRESS;
1604 Symbol* atype = ss.as_symbol();
1605 const char* at = atype->as_C_string();
1606 if (strlen(at) == 2) {
1607 assert(at[0] == '[', "must be");
1608 switch (at[1]) {
1609 case 'B': in_elem_bt[i] = T_BYTE; break;
1610 case 'C': in_elem_bt[i] = T_CHAR; break;
1611 case 'D': in_elem_bt[i] = T_DOUBLE; break;
1612 case 'F': in_elem_bt[i] = T_FLOAT; break;
1613 case 'I': in_elem_bt[i] = T_INT; break;
1614 case 'J': in_elem_bt[i] = T_LONG; break;
1615 case 'S': in_elem_bt[i] = T_SHORT; break;
1616 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
1617 default: ShouldNotReachHere();
1618 }
1619 }
1620 } else {
1621 out_sig_bt[argc++] = in_sig_bt[i];
1622 in_elem_bt[i] = T_VOID;
1623 }
1624 if (in_sig_bt[i] != T_VOID) {
1625 assert(in_sig_bt[i] == ss.type(), "must match");
1626 ss.next();
1627 }
1628 }
1629 }
1630
1631 // Now figure out where the args must be stored and how much stack space
1632 // they require.
1633 int out_arg_slots;
1634 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1635
1636 // Compute framesize for the wrapper. We need to handlize all oops in
1637 // registers a max of 2 on x86.
1638
1639 // Calculate the total number of stack slots we will need.
1640
1641 // First count the abi requirement plus all of the outgoing args
1642 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1643
1644 // Now the space for the inbound oop handle area
1645 int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1646 if (is_critical_native) {
1647 // Critical natives may have to call out so they need a save area
1648 // for register arguments.
1649 int double_slots = 0;
1650 int single_slots = 0;
1651 for ( int i = 0; i < total_in_args; i++) {
1652 if (in_regs[i].first()->is_Register()) {
1653 const Register reg = in_regs[i].first()->as_Register();
1654 switch (in_sig_bt[i]) {
1655 case T_ARRAY: // critical array (uses 2 slots on LP64)
1656 case T_BOOLEAN:
1657 case T_BYTE:
1658 case T_SHORT:
1659 case T_CHAR:
1660 case T_INT: single_slots++; break;
1661 case T_LONG: double_slots++; break;
1662 default: ShouldNotReachHere();
1663 }
1664 } else if (in_regs[i].first()->is_XMMRegister()) {
1665 switch (in_sig_bt[i]) {
1666 case T_FLOAT: single_slots++; break;
1667 case T_DOUBLE: double_slots++; break;
1668 default: ShouldNotReachHere();
1669 }
1670 } else if (in_regs[i].first()->is_FloatRegister()) {
1671 ShouldNotReachHere();
1672 }
1673 }
1674 total_save_slots = double_slots * 2 + single_slots;
1675 // align the save area
1676 if (double_slots != 0) {
1677 stack_slots = align_up(stack_slots, 2);
1678 }
1679 }
1680
1681 int oop_handle_offset = stack_slots;
1682 stack_slots += total_save_slots;
1683
1684 // Now any space we need for handlizing a klass if static method
1685
1686 int klass_slot_offset = 0;
1687 int klass_offset = -1;
1688 int lock_slot_offset = 0;
1689 bool is_static = false;
1690
1691 if (method->is_static()) {
1692 klass_slot_offset = stack_slots;
1693 stack_slots += VMRegImpl::slots_per_word;
1694 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1695 is_static = true;
1696 }
1697
1698 // Plus a lock if needed
1699
1700 if (method->is_synchronized()) {
1701 lock_slot_offset = stack_slots;
1702 stack_slots += VMRegImpl::slots_per_word;
1703 }
1704
1705 // Now a place (+2) to save return values or temp during shuffling
1706 // + 2 for return address (which we own) and saved rbp,
1707 stack_slots += 4;
1708
1709 // Ok The space we have allocated will look like:
1710 //
1711 //
1712 // FP-> | |
1713 // |---------------------|
1714 // | 2 slots for moves |
1715 // |---------------------|
1716 // | lock box (if sync) |
1717 // |---------------------| <- lock_slot_offset (-lock_slot_rbp_offset)
1718 // | klass (if static) |
1719 // |---------------------| <- klass_slot_offset
1720 // | oopHandle area |
1721 // |---------------------| <- oop_handle_offset (a max of 2 registers)
1722 // | outbound memory |
1723 // | based arguments |
1724 // | |
1725 // |---------------------|
1726 // | |
1727 // SP-> | out_preserved_slots |
1728 //
1729 //
1730 // ****************************************************************************
1731 // WARNING - on Windows Java Natives use pascal calling convention and pop the
1732 // arguments off of the stack after the jni call. Before the call we can use
1733 // instructions that are SP relative. After the jni call we switch to FP
1734 // relative instructions instead of re-adjusting the stack on windows.
1735 // ****************************************************************************
1736
1737
1738 // Now compute actual number of stack words we need rounding to make
1739 // stack properly aligned.
1740 stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1741
1742 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1743
1744 intptr_t start = (intptr_t)__ pc();
1745
1746 // First thing make an ic check to see if we should even be here
1747
1748 // We are free to use all registers as temps without saving them and
1749 // restoring them except rbp. rbp is the only callee save register
1750 // as far as the interpreter and the compiler(s) are concerned.
1751
1752
1753 const Register ic_reg = rax;
1754 const Register receiver = rcx;
1755 Label hit;
1756 Label exception_pending;
1757
1758 __ verify_oop(receiver);
1759 __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
1760 __ jcc(Assembler::equal, hit);
1761
1762 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1763
1764 // verified entry must be aligned for code patching.
1765 // and the first 5 bytes must be in the same cache line
1766 // if we align at 8 then we will be sure 5 bytes are in the same line
1767 __ align(8);
1768
1769 __ bind(hit);
1770
1771 int vep_offset = ((intptr_t)__ pc()) - start;
1772
1773 #ifdef COMPILER1
1774 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
1775 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1776 inline_check_hashcode_from_object_header(masm, method, rcx /*obj_reg*/, rax /*result*/);
1777 }
1778 #endif // COMPILER1
1779
1780 // The instruction at the verified entry point must be 5 bytes or longer
1781 // because it can be patched on the fly by make_non_entrant. The stack bang
1782 // instruction fits that requirement.
1783
1784 // Generate stack overflow check
1785
1786 if (UseStackBanging) {
1787 __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
1788 } else {
1789 // need a 5 byte instruction to allow MT safe patching to non-entrant
1790 __ fat_nop();
1791 }
1792
1793 // Generate a new frame for the wrapper.
1794 __ enter();
1795 // -2 because return address is already present and so is saved rbp
1796 __ subptr(rsp, stack_size - 2*wordSize);
1797
1798 // Frame is now completed as far as size and linkage.
1799 int frame_complete = ((intptr_t)__ pc()) - start;
1800
1801 if (UseRTMLocking) {
1802 // Abort RTM transaction before calling JNI
1803 // because critical section will be large and will be
1804 // aborted anyway. Also nmethod could be deoptimized.
1805 __ xabort(0);
1806 }
1807
1808 // Calculate the difference between rsp and rbp,. We need to know it
1809 // after the native call because on windows Java Natives will pop
1810 // the arguments and it is painful to do rsp relative addressing
1811 // in a platform independent way. So after the call we switch to
1812 // rbp, relative addressing.
1813
1814 int fp_adjustment = stack_size - 2*wordSize;
1815
1816 #ifdef COMPILER2
1817 // C2 may leave the stack dirty if not in SSE2+ mode
1818 if (UseSSE >= 2) {
1819 __ verify_FPU(0, "c2i transition should have clean FPU stack");
1820 } else {
1821 __ empty_FPU_stack();
1822 }
1823 #endif /* COMPILER2 */
1824
1825 // Compute the rbp, offset for any slots used after the jni call
1826
1827 int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1828
1829 // We use rdi as a thread pointer because it is callee save and
1830 // if we load it once it is usable thru the entire wrapper
1831 const Register thread = rdi;
1832
1833 // We use rsi as the oop handle for the receiver/klass
1834 // It is callee save so it survives the call to native
1835
1836 const Register oop_handle_reg = rsi;
1837
1838 __ get_thread(thread);
1839
1840 if (is_critical_native) {
1841 check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
1842 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1843 }
1844
1845 //
1846 // We immediately shuffle the arguments so that any vm call we have to
1847 // make from here on out (sync slow path, jvmti, etc.) we will have
1848 // captured the oops from our caller and have a valid oopMap for
1849 // them.
1850
1851 // -----------------
1852 // The Grand Shuffle
1853 //
1854 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1855 // and, if static, the class mirror instead of a receiver. This pretty much
1856 // guarantees that register layout will not match (and x86 doesn't use reg
1857 // parms though amd does). Since the native abi doesn't use register args
1858 // and the java conventions does we don't have to worry about collisions.
1859 // All of our moved are reg->stack or stack->stack.
1860 // We ignore the extra arguments during the shuffle and handle them at the
1861 // last moment. The shuffle is described by the two calling convention
1862 // vectors we have in our possession. We simply walk the java vector to
1863 // get the source locations and the c vector to get the destinations.
1864
1865 int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
1866
1867 // Record rsp-based slot for receiver on stack for non-static methods
1868 int receiver_offset = -1;
1869
1870 // This is a trick. We double the stack slots so we can claim
1871 // the oops in the caller's frame. Since we are sure to have
1872 // more args than the caller doubling is enough to make
1873 // sure we can capture all the incoming oop args from the
1874 // caller.
1875 //
1876 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1877
1878 // Mark location of rbp,
1879 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1880
1881 // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1882 // Are free to temporaries if we have to do stack to steck moves.
1883 // All inbound args are referenced based on rbp, and all outbound args via rsp.
1884
1885 for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
1886 switch (in_sig_bt[i]) {
1887 case T_ARRAY:
1888 if (is_critical_native) {
1889 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1890 c_arg++;
1891 break;
1892 }
1893 case T_OBJECT:
1894 assert(!is_critical_native, "no oop arguments");
1895 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1896 ((i == 0) && (!is_static)),
1897 &receiver_offset);
1898 break;
1899 case T_VOID:
1900 break;
1901
1902 case T_FLOAT:
1903 float_move(masm, in_regs[i], out_regs[c_arg]);
1904 break;
1905
1906 case T_DOUBLE:
1907 assert( i + 1 < total_in_args &&
1908 in_sig_bt[i + 1] == T_VOID &&
1909 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1910 double_move(masm, in_regs[i], out_regs[c_arg]);
1911 break;
1912
1913 case T_LONG :
1914 long_move(masm, in_regs[i], out_regs[c_arg]);
1915 break;
1916
1917 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1918
1919 default:
1920 simple_move32(masm, in_regs[i], out_regs[c_arg]);
1921 }
1922 }
1923
1924 // Pre-load a static method's oop into rsi. Used both by locking code and
1925 // the normal JNI call code.
1926 if (method->is_static() && !is_critical_native) {
1927
1928 // load opp into a register
1929 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
1930
1931 // Now handlize the static class mirror it's known not-null.
1932 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1933 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1934
1935 // Now get the handle
1936 __ lea(oop_handle_reg, Address(rsp, klass_offset));
1937 // store the klass handle as second argument
1938 __ movptr(Address(rsp, wordSize), oop_handle_reg);
1939 }
1940
1941 // Change state to native (we save the return address in the thread, since it might not
1942 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1943 // points into the right code segment. It does not have to be the correct return pc.
1944 // We use the same pc/oopMap repeatedly when we call out
1945
1946 intptr_t the_pc = (intptr_t) __ pc();
1947 oop_maps->add_gc_map(the_pc - start, map);
1948
1949 __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc);
1950
1951
1952 // We have all of the arguments setup at this point. We must not touch any register
1953 // argument registers at this point (what if we save/restore them there are no oop?
1954
1955 {
1956 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
1957 __ mov_metadata(rax, method());
1958 __ call_VM_leaf(
1959 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1960 thread, rax);
1961 }
1962
1963 // RedefineClasses() tracing support for obsolete method entry
1964 if (log_is_enabled(Trace, redefine, class, obsolete)) {
1965 __ mov_metadata(rax, method());
1966 __ call_VM_leaf(
1967 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1968 thread, rax);
1969 }
1970
1971 // These are register definitions we need for locking/unlocking
1972 const Register swap_reg = rax; // Must use rax, for cmpxchg instruction
1973 const Register obj_reg = rcx; // Will contain the oop
1974 const Register lock_reg = rdx; // Address of compiler lock object (BasicLock)
1975
1976 Label slow_path_lock;
1977 Label lock_done;
1978
1979 // Lock a synchronized method
1980 if (method->is_synchronized()) {
1981 assert(!is_critical_native, "unhandled");
1982
1983
1984 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1985
1986 // Get the handle (the 2nd argument)
1987 __ movptr(oop_handle_reg, Address(rsp, wordSize));
1988
1989 // Get address of the box
1990
1991 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1992
1993 // Load the oop from the handle
1994 __ movptr(obj_reg, Address(oop_handle_reg, 0));
1995
1996 if (UseBiasedLocking) {
1997 // Note that oop_handle_reg is trashed during this call
1998 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock);
1999 }
2000
2001 // Load immediate 1 into swap_reg %rax,
2002 __ movptr(swap_reg, 1);
2003
2004 // Load (object->mark() | 1) into swap_reg %rax,
2005 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2006
2007 // Save (object->mark() | 1) into BasicLock's displaced header
2008 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2009
2010 // src -> dest iff dest == rax, else rax, <- dest
2011 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
2012 __ lock();
2013 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2014 __ jcc(Assembler::equal, lock_done);
2015
2016 // Test if the oopMark is an obvious stack pointer, i.e.,
2017 // 1) (mark & 3) == 0, and
2018 // 2) rsp <= mark < mark + os::pagesize()
2019 // These 3 tests can be done by evaluating the following
2020 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2021 // assuming both stack pointer and pagesize have their
2022 // least significant 2 bits clear.
2023 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
2024
2025 __ subptr(swap_reg, rsp);
2026 __ andptr(swap_reg, 3 - os::vm_page_size());
2027
2028 // Save the test result, for recursive case, the result is zero
2029 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2030 __ jcc(Assembler::notEqual, slow_path_lock);
2031 // Slow path will re-enter here
2032 __ bind(lock_done);
2033
2034 if (UseBiasedLocking) {
2035 // Re-fetch oop_handle_reg as we trashed it above
2036 __ movptr(oop_handle_reg, Address(rsp, wordSize));
2037 }
2038 }
2039
2040
2041 // Finally just about ready to make the JNI call
2042
2043
2044 // get JNIEnv* which is first argument to native
2045 if (!is_critical_native) {
2046 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
2047 __ movptr(Address(rsp, 0), rdx);
2048 }
2049
2050 // Now set thread in native
2051 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
2052
2053 __ call(RuntimeAddress(native_func));
2054
2055 // Verify or restore cpu control state after JNI call
2056 __ restore_cpu_control_state_after_jni();
2057
2058 // WARNING - on Windows Java Natives use pascal calling convention and pop the
2059 // arguments off of the stack. We could just re-adjust the stack pointer here
2060 // and continue to do SP relative addressing but we instead switch to FP
2061 // relative addressing.
2062
2063 // Unpack native results.
2064 switch (ret_type) {
2065 case T_BOOLEAN: __ c2bool(rax); break;
2066 case T_CHAR : __ andptr(rax, 0xFFFF); break;
2067 case T_BYTE : __ sign_extend_byte (rax); break;
2068 case T_SHORT : __ sign_extend_short(rax); break;
2069 case T_INT : /* nothing to do */ break;
2070 case T_DOUBLE :
2071 case T_FLOAT :
2072 // Result is in st0 we'll save as needed
2073 break;
2074 case T_ARRAY: // Really a handle
2075 case T_OBJECT: // Really a handle
2076 break; // can't de-handlize until after safepoint check
2077 case T_VOID: break;
2078 case T_LONG: break;
2079 default : ShouldNotReachHere();
2080 }
2081
2082 // Switch thread to "native transition" state before reading the synchronization state.
2083 // This additional state is necessary because reading and testing the synchronization
2084 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2085 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2086 // VM thread changes sync state to synchronizing and suspends threads for GC.
2087 // Thread A is resumed to finish this native method, but doesn't block here since it
2088 // didn't see any synchronization is progress, and escapes.
2089 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2090
2091 // Force this write out before the read below
2092 __ membar(Assembler::Membar_mask_bits(
2093 Assembler::LoadLoad | Assembler::LoadStore |
2094 Assembler::StoreLoad | Assembler::StoreStore));
2095
2096 if (AlwaysRestoreFPU) {
2097 // Make sure the control word is correct.
2098 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2099 }
2100
2101 Label after_transition;
2102
2103 // check for safepoint operation in progress and/or pending suspend requests
2104 { Label Continue, slow_path;
2105
2106 __ safepoint_poll(slow_path, thread, noreg);
2107
2108 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
2109 __ jcc(Assembler::equal, Continue);
2110 __ bind(slow_path);
2111
2112 // Don't use call_VM as it will see a possible pending exception and forward it
2113 // and never return here preventing us from clearing _last_native_pc down below.
2114 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2115 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2116 // by hand.
2117 //
2118 __ vzeroupper();
2119
2120 save_native_result(masm, ret_type, stack_slots);
2121 __ push(thread);
2122 if (!is_critical_native) {
2123 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2124 JavaThread::check_special_condition_for_native_trans)));
2125 } else {
2126 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2127 JavaThread::check_special_condition_for_native_trans_and_transition)));
2128 }
2129 __ increment(rsp, wordSize);
2130 // Restore any method result value
2131 restore_native_result(masm, ret_type, stack_slots);
2132
2133 if (is_critical_native) {
2134 // The call above performed the transition to thread_in_Java so
2135 // skip the transition logic below.
2136 __ jmpb(after_transition);
2137 }
2138
2139 __ bind(Continue);
2140 }
2141
2142 // change thread state
2143 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
2144 __ bind(after_transition);
2145
2146 Label reguard;
2147 Label reguard_done;
2148 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2149 __ jcc(Assembler::equal, reguard);
2150
2151 // slow path reguard re-enters here
2152 __ bind(reguard_done);
2153
2154 // Handle possible exception (will unlock if necessary)
2155
2156 // native result if any is live
2157
2158 // Unlock
2159 Label slow_path_unlock;
2160 Label unlock_done;
2161 if (method->is_synchronized()) {
2162
2163 Label done;
2164
2165 // Get locked oop from the handle we passed to jni
2166 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2167
2168 if (UseBiasedLocking) {
2169 __ biased_locking_exit(obj_reg, rbx, done);
2170 }
2171
2172 // Simple recursive lock?
2173
2174 __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2175 __ jcc(Assembler::equal, done);
2176
2177 // Must save rax, if if it is live now because cmpxchg must use it
2178 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2179 save_native_result(masm, ret_type, stack_slots);
2180 }
2181
2182 // get old displaced header
2183 __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2184
2185 // get address of the stack lock
2186 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2187
2188 // Atomic swap old header if oop still contains the stack lock
2189 // src -> dest iff dest == rax, else rax, <- dest
2190 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2191 __ lock();
2192 __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2193 __ jcc(Assembler::notEqual, slow_path_unlock);
2194
2195 // slow path re-enters here
2196 __ bind(unlock_done);
2197 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2198 restore_native_result(masm, ret_type, stack_slots);
2199 }
2200
2201 __ bind(done);
2202
2203 }
2204
2205 {
2206 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2207 // Tell dtrace about this method exit
2208 save_native_result(masm, ret_type, stack_slots);
2209 __ mov_metadata(rax, method());
2210 __ call_VM_leaf(
2211 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2212 thread, rax);
2213 restore_native_result(masm, ret_type, stack_slots);
2214 }
2215
2216 // We can finally stop using that last_Java_frame we setup ages ago
2217
2218 __ reset_last_Java_frame(thread, false);
2219
2220 // Unbox oop result, e.g. JNIHandles::resolve value.
2221 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2222 __ resolve_jobject(rax /* value */,
2223 thread /* thread */,
2224 rcx /* tmp */);
2225 }
2226
2227 if (CheckJNICalls) {
2228 // clear_pending_jni_exception_check
2229 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
2230 }
2231
2232 if (!is_critical_native) {
2233 // reset handle block
2234 __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
2235 __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2236
2237 // Any exception pending?
2238 __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2239 __ jcc(Assembler::notEqual, exception_pending);
2240 }
2241
2242 // no exception, we're almost done
2243
2244 // check that only result value is on FPU stack
2245 __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
2246
2247 // Fixup floating pointer results so that result looks like a return from a compiled method
2248 if (ret_type == T_FLOAT) {
2249 if (UseSSE >= 1) {
2250 // Pop st0 and store as float and reload into xmm register
2251 __ fstp_s(Address(rbp, -4));
2252 __ movflt(xmm0, Address(rbp, -4));
2253 }
2254 } else if (ret_type == T_DOUBLE) {
2255 if (UseSSE >= 2) {
2256 // Pop st0 and store as double and reload into xmm register
2257 __ fstp_d(Address(rbp, -8));
2258 __ movdbl(xmm0, Address(rbp, -8));
2259 }
2260 }
2261
2262 // Return
2263
2264 __ leave();
2265 __ ret(0);
2266
2267 // Unexpected paths are out of line and go here
2268
2269 // Slow path locking & unlocking
2270 if (method->is_synchronized()) {
2271
2272 // BEGIN Slow path lock
2273
2274 __ bind(slow_path_lock);
2275
2276 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2277 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2278 __ push(thread);
2279 __ push(lock_reg);
2280 __ push(obj_reg);
2281 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
2282 __ addptr(rsp, 3*wordSize);
2283
2284 #ifdef ASSERT
2285 { Label L;
2286 __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2287 __ jcc(Assembler::equal, L);
2288 __ stop("no pending exception allowed on exit from monitorenter");
2289 __ bind(L);
2290 }
2291 #endif
2292 __ jmp(lock_done);
2293
2294 // END Slow path lock
2295
2296 // BEGIN Slow path unlock
2297 __ bind(slow_path_unlock);
2298 __ vzeroupper();
2299 // Slow path unlock
2300
2301 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2302 save_native_result(masm, ret_type, stack_slots);
2303 }
2304 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2305
2306 __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2307 __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2308
2309
2310 // should be a peal
2311 // +wordSize because of the push above
2312 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2313 __ push(thread);
2314 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2315 __ push(rax);
2316
2317 __ push(obj_reg);
2318 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2319 __ addptr(rsp, 3*wordSize);
2320 #ifdef ASSERT
2321 {
2322 Label L;
2323 __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2324 __ jcc(Assembler::equal, L);
2325 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2326 __ bind(L);
2327 }
2328 #endif /* ASSERT */
2329
2330 __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2331
2332 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2333 restore_native_result(masm, ret_type, stack_slots);
2334 }
2335 __ jmp(unlock_done);
2336 // END Slow path unlock
2337
2338 }
2339
2340 // SLOW PATH Reguard the stack if needed
2341
2342 __ bind(reguard);
2343 __ vzeroupper();
2344 save_native_result(masm, ret_type, stack_slots);
2345 {
2346 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2347 }
2348 restore_native_result(masm, ret_type, stack_slots);
2349 __ jmp(reguard_done);
2350
2351
2352 // BEGIN EXCEPTION PROCESSING
2353
2354 if (!is_critical_native) {
2355 // Forward the exception
2356 __ bind(exception_pending);
2357
2358 // remove possible return value from FPU register stack
2359 __ empty_FPU_stack();
2360
2361 // pop our frame
2362 __ leave();
2363 // and forward the exception
2364 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2365 }
2366
2367 __ flush();
2368
2369 nmethod *nm = nmethod::new_native_nmethod(method,
2370 compile_id,
2371 masm->code(),
2372 vep_offset,
2373 frame_complete,
2374 stack_slots / VMRegImpl::slots_per_word,
2375 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2376 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2377 oop_maps);
2378
2379 if (is_critical_native) {
2380 nm->set_lazy_critical_native(true);
2381 }
2382
2383 return nm;
2384
2385 }
2386
2387 // this function returns the adjust size (in number of words) to a c2i adapter
2388 // activation for use during deoptimization
last_frame_adjust(int callee_parameters,int callee_locals)2389 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2390 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2391 }
2392
2393
out_preserve_stack_slots()2394 uint SharedRuntime::out_preserve_stack_slots() {
2395 return 0;
2396 }
2397
2398 //------------------------------generate_deopt_blob----------------------------
generate_deopt_blob()2399 void SharedRuntime::generate_deopt_blob() {
2400 // allocate space for the code
2401 ResourceMark rm;
2402 // setup code generation tools
2403 // note: the buffer code size must account for StackShadowPages=50
2404 CodeBuffer buffer("deopt_blob", 1536, 1024);
2405 MacroAssembler* masm = new MacroAssembler(&buffer);
2406 int frame_size_in_words;
2407 OopMap* map = NULL;
2408 // Account for the extra args we place on the stack
2409 // by the time we call fetch_unroll_info
2410 const int additional_words = 2; // deopt kind, thread
2411
2412 OopMapSet *oop_maps = new OopMapSet();
2413
2414 // -------------
2415 // This code enters when returning to a de-optimized nmethod. A return
2416 // address has been pushed on the the stack, and return values are in
2417 // registers.
2418 // If we are doing a normal deopt then we were called from the patched
2419 // nmethod from the point we returned to the nmethod. So the return
2420 // address on the stack is wrong by NativeCall::instruction_size
2421 // We will adjust the value to it looks like we have the original return
2422 // address on the stack (like when we eagerly deoptimized).
2423 // In the case of an exception pending with deoptimized then we enter
2424 // with a return address on the stack that points after the call we patched
2425 // into the exception handler. We have the following register state:
2426 // rax,: exception
2427 // rbx,: exception handler
2428 // rdx: throwing pc
2429 // So in this case we simply jam rdx into the useless return address and
2430 // the stack looks just like we want.
2431 //
2432 // At this point we need to de-opt. We save the argument return
2433 // registers. We call the first C routine, fetch_unroll_info(). This
2434 // routine captures the return values and returns a structure which
2435 // describes the current frame size and the sizes of all replacement frames.
2436 // The current frame is compiled code and may contain many inlined
2437 // functions, each with their own JVM state. We pop the current frame, then
2438 // push all the new frames. Then we call the C routine unpack_frames() to
2439 // populate these frames. Finally unpack_frames() returns us the new target
2440 // address. Notice that callee-save registers are BLOWN here; they have
2441 // already been captured in the vframeArray at the time the return PC was
2442 // patched.
2443 address start = __ pc();
2444 Label cont;
2445
2446 // Prolog for non exception case!
2447
2448 // Save everything in sight.
2449
2450 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2451 // Normal deoptimization
2452 __ push(Deoptimization::Unpack_deopt);
2453 __ jmp(cont);
2454
2455 int reexecute_offset = __ pc() - start;
2456
2457 // Reexecute case
2458 // return address is the pc describes what bci to do re-execute at
2459
2460 // No need to update map as each call to save_live_registers will produce identical oopmap
2461 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2462
2463 __ push(Deoptimization::Unpack_reexecute);
2464 __ jmp(cont);
2465
2466 int exception_offset = __ pc() - start;
2467
2468 // Prolog for exception case
2469
2470 // all registers are dead at this entry point, except for rax, and
2471 // rdx which contain the exception oop and exception pc
2472 // respectively. Set them in TLS and fall thru to the
2473 // unpack_with_exception_in_tls entry point.
2474
2475 __ get_thread(rdi);
2476 __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2477 __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2478
2479 int exception_in_tls_offset = __ pc() - start;
2480
2481 // new implementation because exception oop is now passed in JavaThread
2482
2483 // Prolog for exception case
2484 // All registers must be preserved because they might be used by LinearScan
2485 // Exceptiop oop and throwing PC are passed in JavaThread
2486 // tos: stack at point of call to method that threw the exception (i.e. only
2487 // args are on the stack, no return address)
2488
2489 // make room on stack for the return address
2490 // It will be patched later with the throwing pc. The correct value is not
2491 // available now because loading it from memory would destroy registers.
2492 __ push(0);
2493
2494 // Save everything in sight.
2495
2496 // No need to update map as each call to save_live_registers will produce identical oopmap
2497 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2498
2499 // Now it is safe to overwrite any register
2500
2501 // store the correct deoptimization type
2502 __ push(Deoptimization::Unpack_exception);
2503
2504 // load throwing pc from JavaThread and patch it as the return address
2505 // of the current frame. Then clear the field in JavaThread
2506 __ get_thread(rdi);
2507 __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2508 __ movptr(Address(rbp, wordSize), rdx);
2509 __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2510
2511 #ifdef ASSERT
2512 // verify that there is really an exception oop in JavaThread
2513 __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2514 __ verify_oop(rax);
2515
2516 // verify that there is no pending exception
2517 Label no_pending_exception;
2518 __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2519 __ testptr(rax, rax);
2520 __ jcc(Assembler::zero, no_pending_exception);
2521 __ stop("must not have pending exception here");
2522 __ bind(no_pending_exception);
2523 #endif
2524
2525 __ bind(cont);
2526
2527 // Compiled code leaves the floating point stack dirty, empty it.
2528 __ empty_FPU_stack();
2529
2530
2531 // Call C code. Need thread and this frame, but NOT official VM entry
2532 // crud. We cannot block on this call, no GC can happen.
2533 __ get_thread(rcx);
2534 __ push(rcx);
2535 // fetch_unroll_info needs to call last_java_frame()
2536 __ set_last_Java_frame(rcx, noreg, noreg, NULL);
2537
2538 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2539
2540 // Need to have an oopmap that tells fetch_unroll_info where to
2541 // find any register it might need.
2542
2543 oop_maps->add_gc_map( __ pc()-start, map);
2544
2545 // Discard args to fetch_unroll_info
2546 __ pop(rcx);
2547 __ pop(rcx);
2548
2549 __ get_thread(rcx);
2550 __ reset_last_Java_frame(rcx, false);
2551
2552 // Load UnrollBlock into EDI
2553 __ mov(rdi, rax);
2554
2555 // Move the unpack kind to a safe place in the UnrollBlock because
2556 // we are very short of registers
2557
2558 Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
2559 // retrieve the deopt kind from the UnrollBlock.
2560 __ movl(rax, unpack_kind);
2561
2562 Label noException;
2563 __ cmpl(rax, Deoptimization::Unpack_exception); // Was exception pending?
2564 __ jcc(Assembler::notEqual, noException);
2565 __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2566 __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2567 __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2568 __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2569
2570 __ verify_oop(rax);
2571
2572 // Overwrite the result registers with the exception results.
2573 __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2574 __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2575
2576 __ bind(noException);
2577
2578 // Stack is back to only having register save data on the stack.
2579 // Now restore the result registers. Everything else is either dead or captured
2580 // in the vframeArray.
2581
2582 RegisterSaver::restore_result_registers(masm);
2583
2584 // Non standard control word may be leaked out through a safepoint blob, and we can
2585 // deopt at a poll point with the non standard control word. However, we should make
2586 // sure the control word is correct after restore_result_registers.
2587 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2588
2589 // All of the register save area has been popped of the stack. Only the
2590 // return address remains.
2591
2592 // Pop all the frames we must move/replace.
2593 //
2594 // Frame picture (youngest to oldest)
2595 // 1: self-frame (no frame link)
2596 // 2: deopting frame (no frame link)
2597 // 3: caller of deopting frame (could be compiled/interpreted).
2598 //
2599 // Note: by leaving the return address of self-frame on the stack
2600 // and using the size of frame 2 to adjust the stack
2601 // when we are done the return to frame 3 will still be on the stack.
2602
2603 // Pop deoptimized frame
2604 __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2605
2606 // sp should be pointing at the return address to the caller (3)
2607
2608 // Pick up the initial fp we should save
2609 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2610 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2611
2612 #ifdef ASSERT
2613 // Compilers generate code that bang the stack by as much as the
2614 // interpreter would need. So this stack banging should never
2615 // trigger a fault. Verify that it does not on non product builds.
2616 if (UseStackBanging) {
2617 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2618 __ bang_stack_size(rbx, rcx);
2619 }
2620 #endif
2621
2622 // Load array of frame pcs into ECX
2623 __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2624
2625 __ pop(rsi); // trash the old pc
2626
2627 // Load array of frame sizes into ESI
2628 __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2629
2630 Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2631
2632 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2633 __ movl(counter, rbx);
2634
2635 // Now adjust the caller's stack to make up for the extra locals
2636 // but record the original sp so that we can save it in the skeletal interpreter
2637 // frame and the stack walking of interpreter_sender will get the unextended sp
2638 // value and not the "real" sp value.
2639
2640 Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2641 __ movptr(sp_temp, rsp);
2642 __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2643 __ subptr(rsp, rbx);
2644
2645 // Push interpreter frames in a loop
2646 Label loop;
2647 __ bind(loop);
2648 __ movptr(rbx, Address(rsi, 0)); // Load frame size
2649 __ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand
2650 __ pushptr(Address(rcx, 0)); // save return address
2651 __ enter(); // save old & set new rbp,
2652 __ subptr(rsp, rbx); // Prolog!
2653 __ movptr(rbx, sp_temp); // sender's sp
2654 // This value is corrected by layout_activation_impl
2655 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2656 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2657 __ movptr(sp_temp, rsp); // pass to next frame
2658 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
2659 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
2660 __ decrementl(counter); // decrement counter
2661 __ jcc(Assembler::notZero, loop);
2662 __ pushptr(Address(rcx, 0)); // save final return address
2663
2664 // Re-push self-frame
2665 __ enter(); // save old & set new rbp,
2666
2667 // Return address and rbp, are in place
2668 // We'll push additional args later. Just allocate a full sized
2669 // register save area
2670 __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2671
2672 // Restore frame locals after moving the frame
2673 __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2674 __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2675 __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize)); // Pop float stack and store in local
2676 if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2677 if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2678
2679 // Set up the args to unpack_frame
2680
2681 __ pushl(unpack_kind); // get the unpack_kind value
2682 __ get_thread(rcx);
2683 __ push(rcx);
2684
2685 // set last_Java_sp, last_Java_fp
2686 __ set_last_Java_frame(rcx, noreg, rbp, NULL);
2687
2688 // Call C code. Need thread but NOT official VM entry
2689 // crud. We cannot block on this call, no GC can happen. Call should
2690 // restore return values to their stack-slots with the new SP.
2691 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2692 // Set an oopmap for the call site
2693 oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2694
2695 // rax, contains the return result type
2696 __ push(rax);
2697
2698 __ get_thread(rcx);
2699 __ reset_last_Java_frame(rcx, false);
2700
2701 // Collect return values
2702 __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2703 __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2704
2705 // Clear floating point stack before returning to interpreter
2706 __ empty_FPU_stack();
2707
2708 // Check if we should push the float or double return value.
2709 Label results_done, yes_double_value;
2710 __ cmpl(Address(rsp, 0), T_DOUBLE);
2711 __ jcc (Assembler::zero, yes_double_value);
2712 __ cmpl(Address(rsp, 0), T_FLOAT);
2713 __ jcc (Assembler::notZero, results_done);
2714
2715 // return float value as expected by interpreter
2716 if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2717 else __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2718 __ jmp(results_done);
2719
2720 // return double value as expected by interpreter
2721 __ bind(yes_double_value);
2722 if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2723 else __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2724
2725 __ bind(results_done);
2726
2727 // Pop self-frame.
2728 __ leave(); // Epilog!
2729
2730 // Jump to interpreter
2731 __ ret(0);
2732
2733 // -------------
2734 // make sure all code is generated
2735 masm->flush();
2736
2737 _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2738 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2739 }
2740
2741
2742 #ifdef COMPILER2
2743 //------------------------------generate_uncommon_trap_blob--------------------
generate_uncommon_trap_blob()2744 void SharedRuntime::generate_uncommon_trap_blob() {
2745 // allocate space for the code
2746 ResourceMark rm;
2747 // setup code generation tools
2748 CodeBuffer buffer("uncommon_trap_blob", 512, 512);
2749 MacroAssembler* masm = new MacroAssembler(&buffer);
2750
2751 enum frame_layout {
2752 arg0_off, // thread sp + 0 // Arg location for
2753 arg1_off, // unloaded_class_index sp + 1 // calling C
2754 arg2_off, // exec_mode sp + 2
2755 // The frame sender code expects that rbp will be in the "natural" place and
2756 // will override any oopMap setting for it. We must therefore force the layout
2757 // so that it agrees with the frame sender code.
2758 rbp_off, // callee saved register sp + 3
2759 return_off, // slot for return address sp + 4
2760 framesize
2761 };
2762
2763 address start = __ pc();
2764
2765 if (UseRTMLocking) {
2766 // Abort RTM transaction before possible nmethod deoptimization.
2767 __ xabort(0);
2768 }
2769
2770 // Push self-frame.
2771 __ subptr(rsp, return_off*wordSize); // Epilog!
2772
2773 // rbp, is an implicitly saved callee saved register (i.e. the calling
2774 // convention will save restore it in prolog/epilog) Other than that
2775 // there are no callee save registers no that adapter frames are gone.
2776 __ movptr(Address(rsp, rbp_off*wordSize), rbp);
2777
2778 // Clear the floating point exception stack
2779 __ empty_FPU_stack();
2780
2781 // set last_Java_sp
2782 __ get_thread(rdx);
2783 __ set_last_Java_frame(rdx, noreg, noreg, NULL);
2784
2785 // Call C code. Need thread but NOT official VM entry
2786 // crud. We cannot block on this call, no GC can happen. Call should
2787 // capture callee-saved registers as well as return values.
2788 __ movptr(Address(rsp, arg0_off*wordSize), rdx);
2789 // argument already in ECX
2790 __ movl(Address(rsp, arg1_off*wordSize),rcx);
2791 __ movl(Address(rsp, arg2_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2792 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2793
2794 // Set an oopmap for the call site
2795 OopMapSet *oop_maps = new OopMapSet();
2796 OopMap* map = new OopMap( framesize, 0 );
2797 // No oopMap for rbp, it is known implicitly
2798
2799 oop_maps->add_gc_map( __ pc()-start, map);
2800
2801 __ get_thread(rcx);
2802
2803 __ reset_last_Java_frame(rcx, false);
2804
2805 // Load UnrollBlock into EDI
2806 __ movptr(rdi, rax);
2807
2808 #ifdef ASSERT
2809 { Label L;
2810 __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
2811 (int32_t)Deoptimization::Unpack_uncommon_trap);
2812 __ jcc(Assembler::equal, L);
2813 __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
2814 __ bind(L);
2815 }
2816 #endif
2817
2818 // Pop all the frames we must move/replace.
2819 //
2820 // Frame picture (youngest to oldest)
2821 // 1: self-frame (no frame link)
2822 // 2: deopting frame (no frame link)
2823 // 3: caller of deopting frame (could be compiled/interpreted).
2824
2825 // Pop self-frame. We have no frame, and must rely only on EAX and ESP.
2826 __ addptr(rsp,(framesize-1)*wordSize); // Epilog!
2827
2828 // Pop deoptimized frame
2829 __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2830 __ addptr(rsp, rcx);
2831
2832 // sp should be pointing at the return address to the caller (3)
2833
2834 // Pick up the initial fp we should save
2835 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2836 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2837
2838 #ifdef ASSERT
2839 // Compilers generate code that bang the stack by as much as the
2840 // interpreter would need. So this stack banging should never
2841 // trigger a fault. Verify that it does not on non product builds.
2842 if (UseStackBanging) {
2843 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2844 __ bang_stack_size(rbx, rcx);
2845 }
2846 #endif
2847
2848 // Load array of frame pcs into ECX
2849 __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2850
2851 __ pop(rsi); // trash the pc
2852
2853 // Load array of frame sizes into ESI
2854 __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2855
2856 Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2857
2858 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2859 __ movl(counter, rbx);
2860
2861 // Now adjust the caller's stack to make up for the extra locals
2862 // but record the original sp so that we can save it in the skeletal interpreter
2863 // frame and the stack walking of interpreter_sender will get the unextended sp
2864 // value and not the "real" sp value.
2865
2866 Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2867 __ movptr(sp_temp, rsp);
2868 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2869 __ subptr(rsp, rbx);
2870
2871 // Push interpreter frames in a loop
2872 Label loop;
2873 __ bind(loop);
2874 __ movptr(rbx, Address(rsi, 0)); // Load frame size
2875 __ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand
2876 __ pushptr(Address(rcx, 0)); // save return address
2877 __ enter(); // save old & set new rbp,
2878 __ subptr(rsp, rbx); // Prolog!
2879 __ movptr(rbx, sp_temp); // sender's sp
2880 // This value is corrected by layout_activation_impl
2881 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2882 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2883 __ movptr(sp_temp, rsp); // pass to next frame
2884 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
2885 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
2886 __ decrementl(counter); // decrement counter
2887 __ jcc(Assembler::notZero, loop);
2888 __ pushptr(Address(rcx, 0)); // save final return address
2889
2890 // Re-push self-frame
2891 __ enter(); // save old & set new rbp,
2892 __ subptr(rsp, (framesize-2) * wordSize); // Prolog!
2893
2894
2895 // set last_Java_sp, last_Java_fp
2896 __ get_thread(rdi);
2897 __ set_last_Java_frame(rdi, noreg, rbp, NULL);
2898
2899 // Call C code. Need thread but NOT official VM entry
2900 // crud. We cannot block on this call, no GC can happen. Call should
2901 // restore return values to their stack-slots with the new SP.
2902 __ movptr(Address(rsp,arg0_off*wordSize),rdi);
2903 __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2904 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2905 // Set an oopmap for the call site
2906 oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
2907
2908 __ get_thread(rdi);
2909 __ reset_last_Java_frame(rdi, true);
2910
2911 // Pop self-frame.
2912 __ leave(); // Epilog!
2913
2914 // Jump to interpreter
2915 __ ret(0);
2916
2917 // -------------
2918 // make sure all code is generated
2919 masm->flush();
2920
2921 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
2922 }
2923 #endif // COMPILER2
2924
2925 //------------------------------generate_handler_blob------
2926 //
2927 // Generate a special Compile2Runtime blob that saves all registers,
2928 // setup oopmap, and calls safepoint code to stop the compiled code for
2929 // a safepoint.
2930 //
generate_handler_blob(address call_ptr,int poll_type)2931 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2932
2933 // Account for thread arg in our frame
2934 const int additional_words = 1;
2935 int frame_size_in_words;
2936
2937 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2938
2939 ResourceMark rm;
2940 OopMapSet *oop_maps = new OopMapSet();
2941 OopMap* map;
2942
2943 // allocate space for the code
2944 // setup code generation tools
2945 CodeBuffer buffer("handler_blob", 1024, 512);
2946 MacroAssembler* masm = new MacroAssembler(&buffer);
2947
2948 const Register java_thread = rdi; // callee-saved for VC++
2949 address start = __ pc();
2950 address call_pc = NULL;
2951 bool cause_return = (poll_type == POLL_AT_RETURN);
2952 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
2953
2954 if (UseRTMLocking) {
2955 // Abort RTM transaction before calling runtime
2956 // because critical section will be large and will be
2957 // aborted anyway. Also nmethod could be deoptimized.
2958 __ xabort(0);
2959 }
2960
2961 // If cause_return is true we are at a poll_return and there is
2962 // the return address on the stack to the caller on the nmethod
2963 // that is safepoint. We can leave this return on the stack and
2964 // effectively complete the return and safepoint in the caller.
2965 // Otherwise we push space for a return address that the safepoint
2966 // handler will install later to make the stack walking sensible.
2967 if (!cause_return)
2968 __ push(rbx); // Make room for return address (or push it again)
2969
2970 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
2971
2972 // The following is basically a call_VM. However, we need the precise
2973 // address of the call in order to generate an oopmap. Hence, we do all the
2974 // work ourselves.
2975
2976 // Push thread argument and setup last_Java_sp
2977 __ get_thread(java_thread);
2978 __ push(java_thread);
2979 __ set_last_Java_frame(java_thread, noreg, noreg, NULL);
2980
2981 // if this was not a poll_return then we need to correct the return address now.
2982 if (!cause_return) {
2983 // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
2984 // Additionally, rbx is a callee saved register and we can look at it later to determine
2985 // if someone changed the return address for us!
2986 __ movptr(rbx, Address(java_thread, JavaThread::saved_exception_pc_offset()));
2987 __ movptr(Address(rbp, wordSize), rbx);
2988 }
2989
2990 // do the call
2991 __ call(RuntimeAddress(call_ptr));
2992
2993 // Set an oopmap for the call site. This oopmap will map all
2994 // oop-registers and debug-info registers as callee-saved. This
2995 // will allow deoptimization at this safepoint to find all possible
2996 // debug-info recordings, as well as let GC find all oops.
2997
2998 oop_maps->add_gc_map( __ pc() - start, map);
2999
3000 // Discard arg
3001 __ pop(rcx);
3002
3003 Label noException;
3004
3005 // Clear last_Java_sp again
3006 __ get_thread(java_thread);
3007 __ reset_last_Java_frame(java_thread, false);
3008
3009 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3010 __ jcc(Assembler::equal, noException);
3011
3012 // Exception pending
3013 RegisterSaver::restore_live_registers(masm, save_vectors);
3014
3015 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3016
3017 __ bind(noException);
3018
3019 Label no_adjust, bail, not_special;
3020 if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
3021 // If our stashed return pc was modified by the runtime we avoid touching it
3022 __ cmpptr(rbx, Address(rbp, wordSize));
3023 __ jccb(Assembler::notEqual, no_adjust);
3024
3025 // Skip over the poll instruction.
3026 // See NativeInstruction::is_safepoint_poll()
3027 // Possible encodings:
3028 // 85 00 test %eax,(%rax)
3029 // 85 01 test %eax,(%rcx)
3030 // 85 02 test %eax,(%rdx)
3031 // 85 03 test %eax,(%rbx)
3032 // 85 06 test %eax,(%rsi)
3033 // 85 07 test %eax,(%rdi)
3034 //
3035 // 85 04 24 test %eax,(%rsp)
3036 // 85 45 00 test %eax,0x0(%rbp)
3037
3038 #ifdef ASSERT
3039 __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
3040 #endif
3041 // rsp/rbp base encoding takes 3 bytes with the following register values:
3042 // rsp 0x04
3043 // rbp 0x05
3044 __ movzbl(rcx, Address(rbx, 1));
3045 __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
3046 __ subptr(rcx, 4); // looking for 0x00 .. 0x01
3047 __ cmpptr(rcx, 1);
3048 __ jcc(Assembler::above, not_special);
3049 __ addptr(rbx, 1);
3050 __ bind(not_special);
3051 #ifdef ASSERT
3052 // Verify the correct encoding of the poll we're about to skip.
3053 __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
3054 __ jcc(Assembler::notEqual, bail);
3055 // Mask out the modrm bits
3056 __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
3057 // rax encodes to 0, so if the bits are nonzero it's incorrect
3058 __ jcc(Assembler::notZero, bail);
3059 #endif
3060 // Adjust return pc forward to step over the safepoint poll instruction
3061 __ addptr(rbx, 2);
3062 __ movptr(Address(rbp, wordSize), rbx);
3063 }
3064
3065 __ bind(no_adjust);
3066 // Normal exit, register restoring and exit
3067 RegisterSaver::restore_live_registers(masm, save_vectors);
3068
3069 __ ret(0);
3070
3071 #ifdef ASSERT
3072 __ bind(bail);
3073 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
3074 #endif
3075
3076 // make sure all code is generated
3077 masm->flush();
3078
3079 // Fill-out other meta info
3080 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3081 }
3082
3083 //
3084 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3085 //
3086 // Generate a stub that calls into vm to find out the proper destination
3087 // of a java call. All the argument registers are live at this point
3088 // but since this is generic code we don't know what they are and the caller
3089 // must do any gc of the args.
3090 //
generate_resolve_blob(address destination,const char * name)3091 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3092 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3093
3094 // allocate space for the code
3095 ResourceMark rm;
3096
3097 CodeBuffer buffer(name, 1000, 512);
3098 MacroAssembler* masm = new MacroAssembler(&buffer);
3099
3100 int frame_size_words;
3101 enum frame_layout {
3102 thread_off,
3103 extra_words };
3104
3105 OopMapSet *oop_maps = new OopMapSet();
3106 OopMap* map = NULL;
3107
3108 int start = __ offset();
3109
3110 map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
3111
3112 int frame_complete = __ offset();
3113
3114 const Register thread = rdi;
3115 __ get_thread(rdi);
3116
3117 __ push(thread);
3118 __ set_last_Java_frame(thread, noreg, rbp, NULL);
3119
3120 __ call(RuntimeAddress(destination));
3121
3122
3123 // Set an oopmap for the call site.
3124 // We need this not only for callee-saved registers, but also for volatile
3125 // registers that the compiler might be keeping live across a safepoint.
3126
3127 oop_maps->add_gc_map( __ offset() - start, map);
3128
3129 // rax, contains the address we are going to jump to assuming no exception got installed
3130
3131 __ addptr(rsp, wordSize);
3132
3133 // clear last_Java_sp
3134 __ reset_last_Java_frame(thread, true);
3135 // check for pending exceptions
3136 Label pending;
3137 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3138 __ jcc(Assembler::notEqual, pending);
3139
3140 // get the returned Method*
3141 __ get_vm_result_2(rbx, thread);
3142 __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
3143
3144 __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
3145
3146 RegisterSaver::restore_live_registers(masm);
3147
3148 // We are back the the original state on entry and ready to go.
3149
3150 __ jmp(rax);
3151
3152 // Pending exception after the safepoint
3153
3154 __ bind(pending);
3155
3156 RegisterSaver::restore_live_registers(masm);
3157
3158 // exception pending => remove activation and forward to exception handler
3159
3160 __ get_thread(thread);
3161 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
3162 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
3163 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3164
3165 // -------------
3166 // make sure all code is generated
3167 masm->flush();
3168
3169 // return the blob
3170 // frame_size_words or bytes??
3171 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3172 }
3173