1 /*
2 * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/nativeInst.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "gc/shared/gcLocker.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "logging/log.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/compiledICHolder.hpp"
37 #include "runtime/safepointMechanism.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/vframeArray.hpp"
40 #include "runtime/vm_version.hpp"
41 #include "utilities/align.hpp"
42 #include "utilities/macros.hpp"
43 #include "vmreg_x86.inline.hpp"
44 #ifdef COMPILER1
45 #include "c1/c1_Runtime1.hpp"
46 #endif
47 #ifdef COMPILER2
48 #include "opto/runtime.hpp"
49 #endif
50 #if INCLUDE_SHENANDOAHGC
51 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
52 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
53 #endif
54
55 #define __ masm->
56
57 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
58
59 class RegisterSaver {
60 // Capture info about frame layout
61 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
62 enum layout {
63 fpu_state_off = 0,
64 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
65 st0_off, st0H_off,
66 st1_off, st1H_off,
67 st2_off, st2H_off,
68 st3_off, st3H_off,
69 st4_off, st4H_off,
70 st5_off, st5H_off,
71 st6_off, st6H_off,
72 st7_off, st7H_off,
73 xmm_off,
74 DEF_XMM_OFFS(0),
75 DEF_XMM_OFFS(1),
76 DEF_XMM_OFFS(2),
77 DEF_XMM_OFFS(3),
78 DEF_XMM_OFFS(4),
79 DEF_XMM_OFFS(5),
80 DEF_XMM_OFFS(6),
81 DEF_XMM_OFFS(7),
82 flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
83 rdi_off,
84 rsi_off,
85 ignore_off, // extra copy of rbp,
86 rsp_off,
87 rbx_off,
88 rdx_off,
89 rcx_off,
90 rax_off,
91 // The frame sender code expects that rbp will be in the "natural" place and
92 // will override any oopMap setting for it. We must therefore force the layout
93 // so that it agrees with the frame sender code.
94 rbp_off,
95 return_off, // slot for return address
96 reg_save_size };
97 enum { FPU_regs_live = flags_off - fpu_state_end };
98
99 public:
100
101 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
102 int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
103 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
104
rax_offset()105 static int rax_offset() { return rax_off; }
rbx_offset()106 static int rbx_offset() { return rbx_off; }
107
108 // Offsets into the register save area
109 // Used by deoptimization when it is managing result register
110 // values on its own
111
raxOffset(void)112 static int raxOffset(void) { return rax_off; }
rdxOffset(void)113 static int rdxOffset(void) { return rdx_off; }
rbxOffset(void)114 static int rbxOffset(void) { return rbx_off; }
xmm0Offset(void)115 static int xmm0Offset(void) { return xmm0_off; }
116 // This really returns a slot in the fp save area, which one is not important
fpResultOffset(void)117 static int fpResultOffset(void) { return st0_off; }
118
119 // During deoptimization only the result register need to be restored
120 // all the other values have already been extracted.
121
122 static void restore_result_registers(MacroAssembler* masm);
123
124 };
125
save_live_registers(MacroAssembler * masm,int additional_frame_words,int * total_frame_words,bool verify_fpu,bool save_vectors)126 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
127 int* total_frame_words, bool verify_fpu, bool save_vectors) {
128 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
129 int ymm_bytes = num_xmm_regs * 16;
130 int zmm_bytes = num_xmm_regs * 32;
131 #ifdef COMPILER2
132 if (save_vectors) {
133 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
134 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
135 // Save upper half of YMM registers
136 int vect_bytes = ymm_bytes;
137 if (UseAVX > 2) {
138 // Save upper half of ZMM registers as well
139 vect_bytes += zmm_bytes;
140 }
141 additional_frame_words += vect_bytes / wordSize;
142 }
143 #else
144 assert(!save_vectors, "vectors are generated only by C2");
145 #endif
146 int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
147 int frame_words = frame_size_in_bytes / wordSize;
148 *total_frame_words = frame_words;
149
150 assert(FPUStateSizeInWords == 27, "update stack layout");
151
152 // save registers, fpu state, and flags
153 // We assume caller has already has return address slot on the stack
154 // We push epb twice in this sequence because we want the real rbp,
155 // to be under the return like a normal enter and we want to use pusha
156 // We push by hand instead of using push.
157 __ enter();
158 __ pusha();
159 __ pushf();
160 __ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
161 __ push_FPU_state(); // Save FPU state & init
162
163 if (verify_fpu) {
164 // Some stubs may have non standard FPU control word settings so
165 // only check and reset the value when it required to be the
166 // standard value. The safepoint blob in particular can be used
167 // in methods which are using the 24 bit control word for
168 // optimized float math.
169
170 #ifdef ASSERT
171 // Make sure the control word has the expected value
172 Label ok;
173 __ cmpw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
174 __ jccb(Assembler::equal, ok);
175 __ stop("corrupted control word detected");
176 __ bind(ok);
177 #endif
178
179 // Reset the control word to guard against exceptions being unmasked
180 // since fstp_d can cause FPU stack underflow exceptions. Write it
181 // into the on stack copy and then reload that to make sure that the
182 // current and future values are correct.
183 __ movw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
184 }
185
186 __ frstor(Address(rsp, 0));
187 if (!verify_fpu) {
188 // Set the control word so that exceptions are masked for the
189 // following code.
190 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
191 }
192
193 int off = st0_off;
194 int delta = st1_off - off;
195
196 // Save the FPU registers in de-opt-able form
197 for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
198 __ fstp_d(Address(rsp, off*wordSize));
199 off += delta;
200 }
201
202 off = xmm0_off;
203 delta = xmm1_off - off;
204 if(UseSSE == 1) {
205 // Save the XMM state
206 for (int n = 0; n < num_xmm_regs; n++) {
207 __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
208 off += delta;
209 }
210 } else if(UseSSE >= 2) {
211 // Save whole 128bit (16 bytes) XMM registers
212 for (int n = 0; n < num_xmm_regs; n++) {
213 __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
214 off += delta;
215 }
216 }
217
218 if (save_vectors) {
219 __ subptr(rsp, ymm_bytes);
220 // Save upper half of YMM registers
221 for (int n = 0; n < num_xmm_regs; n++) {
222 __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
223 }
224 if (UseAVX > 2) {
225 __ subptr(rsp, zmm_bytes);
226 // Save upper half of ZMM registers
227 for (int n = 0; n < num_xmm_regs; n++) {
228 __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
229 }
230 }
231 }
232 __ vzeroupper();
233
234 // Set an oopmap for the call site. This oopmap will map all
235 // oop-registers and debug-info registers as callee-saved. This
236 // will allow deoptimization at this safepoint to find all possible
237 // debug-info recordings, as well as let GC find all oops.
238
239 OopMapSet *oop_maps = new OopMapSet();
240 OopMap* map = new OopMap( frame_words, 0 );
241
242 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
243 #define NEXTREG(x) (x)->as_VMReg()->next()
244
245 map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
246 map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
247 map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
248 map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
249 // rbp, location is known implicitly, no oopMap
250 map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
251 map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
252 // %%% This is really a waste but we'll keep things as they were for now for the upper component
253 off = st0_off;
254 delta = st1_off - off;
255 for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
256 FloatRegister freg_name = as_FloatRegister(n);
257 map->set_callee_saved(STACK_OFFSET(off), freg_name->as_VMReg());
258 map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(freg_name));
259 off += delta;
260 }
261 off = xmm0_off;
262 delta = xmm1_off - off;
263 for (int n = 0; n < num_xmm_regs; n++) {
264 XMMRegister xmm_name = as_XMMRegister(n);
265 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
266 map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
267 off += delta;
268 }
269 #undef NEXTREG
270 #undef STACK_OFFSET
271
272 return map;
273 }
274
restore_live_registers(MacroAssembler * masm,bool restore_vectors)275 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
276 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
277 int ymm_bytes = num_xmm_regs * 16;
278 int zmm_bytes = num_xmm_regs * 32;
279 // Recover XMM & FPU state
280 int additional_frame_bytes = 0;
281 #ifdef COMPILER2
282 if (restore_vectors) {
283 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
284 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
285 // Save upper half of YMM registers
286 additional_frame_bytes = ymm_bytes;
287 if (UseAVX > 2) {
288 // Save upper half of ZMM registers as well
289 additional_frame_bytes += zmm_bytes;
290 }
291 }
292 #else
293 assert(!restore_vectors, "vectors are generated only by C2");
294 #endif
295
296 int off = xmm0_off;
297 int delta = xmm1_off - off;
298
299 __ vzeroupper();
300
301 if (UseSSE == 1) {
302 // Restore XMM registers
303 assert(additional_frame_bytes == 0, "");
304 for (int n = 0; n < num_xmm_regs; n++) {
305 __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
306 off += delta;
307 }
308 } else if (UseSSE >= 2) {
309 // Restore whole 128bit (16 bytes) XMM registers. Do this before restoring YMM and
310 // ZMM because the movdqu instruction zeros the upper part of the XMM register.
311 for (int n = 0; n < num_xmm_regs; n++) {
312 __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
313 off += delta;
314 }
315 }
316
317 if (restore_vectors) {
318 off = additional_frame_bytes - ymm_bytes;
319 // Restore upper half of YMM registers.
320 for (int n = 0; n < num_xmm_regs; n++) {
321 __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16+off));
322 }
323
324 if (UseAVX > 2) {
325 // Restore upper half of ZMM registers.
326 for (int n = 0; n < num_xmm_regs; n++) {
327 __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32));
328 }
329 }
330 __ addptr(rsp, additional_frame_bytes);
331 }
332
333 __ pop_FPU_state();
334 __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
335
336 __ popf();
337 __ popa();
338 // Get the rbp, described implicitly by the frame sender code (no oopMap)
339 __ pop(rbp);
340 }
341
restore_result_registers(MacroAssembler * masm)342 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
343
344 // Just restore result register. Only used by deoptimization. By
345 // now any callee save register that needs to be restore to a c2
346 // caller of the deoptee has been extracted into the vframeArray
347 // and will be stuffed into the c2i adapter we create for later
348 // restoration so only result registers need to be restored here.
349 //
350
351 __ frstor(Address(rsp, 0)); // Restore fpu state
352
353 // Recover XMM & FPU state
354 if( UseSSE == 1 ) {
355 __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
356 } else if( UseSSE >= 2 ) {
357 __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
358 }
359 __ movptr(rax, Address(rsp, rax_off*wordSize));
360 __ movptr(rdx, Address(rsp, rdx_off*wordSize));
361 // Pop all of the register save are off the stack except the return address
362 __ addptr(rsp, return_off * wordSize);
363 }
364
365 // Is vector's size (in bytes) bigger than a size saved by default?
366 // 16 bytes XMM registers are saved by default using SSE2 movdqu instructions.
367 // Note, MaxVectorSize == 0 with UseSSE < 2 and vectors are not generated.
is_wide_vector(int size)368 bool SharedRuntime::is_wide_vector(int size) {
369 return size > 16;
370 }
371
trampoline_size()372 size_t SharedRuntime::trampoline_size() {
373 return 16;
374 }
375
generate_trampoline(MacroAssembler * masm,address destination)376 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
377 __ jump(RuntimeAddress(destination));
378 }
379
380 // The java_calling_convention describes stack locations as ideal slots on
381 // a frame with no abi restrictions. Since we must observe abi restrictions
382 // (like the placement of the register window) the slots must be biased by
383 // the following value.
reg2offset_in(VMReg r)384 static int reg2offset_in(VMReg r) {
385 // Account for saved rbp, and return address
386 // This should really be in_preserve_stack_slots
387 return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
388 }
389
reg2offset_out(VMReg r)390 static int reg2offset_out(VMReg r) {
391 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
392 }
393
394 // ---------------------------------------------------------------------------
395 // Read the array of BasicTypes from a signature, and compute where the
396 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
397 // quantities. Values less than SharedInfo::stack0 are registers, those above
398 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
399 // as framesizes are fixed.
400 // VMRegImpl::stack0 refers to the first slot 0(sp).
401 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
402 // up to RegisterImpl::number_of_registers) are the 32-bit
403 // integer registers.
404
405 // Pass first two oop/int args in registers ECX and EDX.
406 // Pass first two float/double args in registers XMM0 and XMM1.
407 // Doubles have precedence, so if you pass a mix of floats and doubles
408 // the doubles will grab the registers before the floats will.
409
410 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
411 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
412 // units regardless of build. Of course for i486 there is no 64 bit build
413
414
415 // ---------------------------------------------------------------------------
416 // The compiled Java calling convention.
417 // Pass first two oop/int args in registers ECX and EDX.
418 // Pass first two float/double args in registers XMM0 and XMM1.
419 // Doubles have precedence, so if you pass a mix of floats and doubles
420 // the doubles will grab the registers before the floats will.
java_calling_convention(const BasicType * sig_bt,VMRegPair * regs,int total_args_passed,int is_outgoing)421 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
422 VMRegPair *regs,
423 int total_args_passed,
424 int is_outgoing) {
425 uint stack = 0; // Starting stack position for args on stack
426
427
428 // Pass first two oop/int args in registers ECX and EDX.
429 uint reg_arg0 = 9999;
430 uint reg_arg1 = 9999;
431
432 // Pass first two float/double args in registers XMM0 and XMM1.
433 // Doubles have precedence, so if you pass a mix of floats and doubles
434 // the doubles will grab the registers before the floats will.
435 // CNC - TURNED OFF FOR non-SSE.
436 // On Intel we have to round all doubles (and most floats) at
437 // call sites by storing to the stack in any case.
438 // UseSSE=0 ==> Don't Use ==> 9999+0
439 // UseSSE=1 ==> Floats only ==> 9999+1
440 // UseSSE>=2 ==> Floats or doubles ==> 9999+2
441 enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
442 uint fargs = (UseSSE>=2) ? 2 : UseSSE;
443 uint freg_arg0 = 9999+fargs;
444 uint freg_arg1 = 9999+fargs;
445
446 // Pass doubles & longs aligned on the stack. First count stack slots for doubles
447 int i;
448 for( i = 0; i < total_args_passed; i++) {
449 if( sig_bt[i] == T_DOUBLE ) {
450 // first 2 doubles go in registers
451 if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
452 else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
453 else // Else double is passed low on the stack to be aligned.
454 stack += 2;
455 } else if( sig_bt[i] == T_LONG ) {
456 stack += 2;
457 }
458 }
459 int dstack = 0; // Separate counter for placing doubles
460
461 // Now pick where all else goes.
462 for( i = 0; i < total_args_passed; i++) {
463 // From the type and the argument number (count) compute the location
464 switch( sig_bt[i] ) {
465 case T_SHORT:
466 case T_CHAR:
467 case T_BYTE:
468 case T_BOOLEAN:
469 case T_INT:
470 case T_ARRAY:
471 case T_OBJECT:
472 case T_ADDRESS:
473 if( reg_arg0 == 9999 ) {
474 reg_arg0 = i;
475 regs[i].set1(rcx->as_VMReg());
476 } else if( reg_arg1 == 9999 ) {
477 reg_arg1 = i;
478 regs[i].set1(rdx->as_VMReg());
479 } else {
480 regs[i].set1(VMRegImpl::stack2reg(stack++));
481 }
482 break;
483 case T_FLOAT:
484 if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
485 freg_arg0 = i;
486 regs[i].set1(xmm0->as_VMReg());
487 } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
488 freg_arg1 = i;
489 regs[i].set1(xmm1->as_VMReg());
490 } else {
491 regs[i].set1(VMRegImpl::stack2reg(stack++));
492 }
493 break;
494 case T_LONG:
495 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
496 regs[i].set2(VMRegImpl::stack2reg(dstack));
497 dstack += 2;
498 break;
499 case T_DOUBLE:
500 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
501 if( freg_arg0 == (uint)i ) {
502 regs[i].set2(xmm0->as_VMReg());
503 } else if( freg_arg1 == (uint)i ) {
504 regs[i].set2(xmm1->as_VMReg());
505 } else {
506 regs[i].set2(VMRegImpl::stack2reg(dstack));
507 dstack += 2;
508 }
509 break;
510 case T_VOID: regs[i].set_bad(); break;
511 break;
512 default:
513 ShouldNotReachHere();
514 break;
515 }
516 }
517
518 // return value can be odd number of VMRegImpl stack slots make multiple of 2
519 return align_up(stack, 2);
520 }
521
522 // Patch the callers callsite with entry to compiled code if it exists.
patch_callers_callsite(MacroAssembler * masm)523 static void patch_callers_callsite(MacroAssembler *masm) {
524 Label L;
525 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
526 __ jcc(Assembler::equal, L);
527 // Schedule the branch target address early.
528 // Call into the VM to patch the caller, then jump to compiled callee
529 // rax, isn't live so capture return address while we easily can
530 __ movptr(rax, Address(rsp, 0));
531 __ pusha();
532 __ pushf();
533
534 if (UseSSE == 1) {
535 __ subptr(rsp, 2*wordSize);
536 __ movflt(Address(rsp, 0), xmm0);
537 __ movflt(Address(rsp, wordSize), xmm1);
538 }
539 if (UseSSE >= 2) {
540 __ subptr(rsp, 4*wordSize);
541 __ movdbl(Address(rsp, 0), xmm0);
542 __ movdbl(Address(rsp, 2*wordSize), xmm1);
543 }
544 #ifdef COMPILER2
545 // C2 may leave the stack dirty if not in SSE2+ mode
546 if (UseSSE >= 2) {
547 __ verify_FPU(0, "c2i transition should have clean FPU stack");
548 } else {
549 __ empty_FPU_stack();
550 }
551 #endif /* COMPILER2 */
552
553 // VM needs caller's callsite
554 __ push(rax);
555 // VM needs target method
556 __ push(rbx);
557 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
558 __ addptr(rsp, 2*wordSize);
559
560 if (UseSSE == 1) {
561 __ movflt(xmm0, Address(rsp, 0));
562 __ movflt(xmm1, Address(rsp, wordSize));
563 __ addptr(rsp, 2*wordSize);
564 }
565 if (UseSSE >= 2) {
566 __ movdbl(xmm0, Address(rsp, 0));
567 __ movdbl(xmm1, Address(rsp, 2*wordSize));
568 __ addptr(rsp, 4*wordSize);
569 }
570
571 __ popf();
572 __ popa();
573 __ bind(L);
574 }
575
576
move_c2i_double(MacroAssembler * masm,XMMRegister r,int st_off)577 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
578 int next_off = st_off - Interpreter::stackElementSize;
579 __ movdbl(Address(rsp, next_off), r);
580 }
581
gen_c2i_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,Label & skip_fixup)582 static void gen_c2i_adapter(MacroAssembler *masm,
583 int total_args_passed,
584 int comp_args_on_stack,
585 const BasicType *sig_bt,
586 const VMRegPair *regs,
587 Label& skip_fixup) {
588 // Before we get into the guts of the C2I adapter, see if we should be here
589 // at all. We've come from compiled code and are attempting to jump to the
590 // interpreter, which means the caller made a static call to get here
591 // (vcalls always get a compiled target if there is one). Check for a
592 // compiled target. If there is one, we need to patch the caller's call.
593 patch_callers_callsite(masm);
594
595 __ bind(skip_fixup);
596
597 #ifdef COMPILER2
598 // C2 may leave the stack dirty if not in SSE2+ mode
599 if (UseSSE >= 2) {
600 __ verify_FPU(0, "c2i transition should have clean FPU stack");
601 } else {
602 __ empty_FPU_stack();
603 }
604 #endif /* COMPILER2 */
605
606 // Since all args are passed on the stack, total_args_passed * interpreter_
607 // stack_element_size is the
608 // space we need.
609 int extraspace = total_args_passed * Interpreter::stackElementSize;
610
611 // Get return address
612 __ pop(rax);
613
614 // set senderSP value
615 __ movptr(rsi, rsp);
616
617 __ subptr(rsp, extraspace);
618
619 // Now write the args into the outgoing interpreter space
620 for (int i = 0; i < total_args_passed; i++) {
621 if (sig_bt[i] == T_VOID) {
622 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
623 continue;
624 }
625
626 // st_off points to lowest address on stack.
627 int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
628 int next_off = st_off - Interpreter::stackElementSize;
629
630 // Say 4 args:
631 // i st_off
632 // 0 12 T_LONG
633 // 1 8 T_VOID
634 // 2 4 T_OBJECT
635 // 3 0 T_BOOL
636 VMReg r_1 = regs[i].first();
637 VMReg r_2 = regs[i].second();
638 if (!r_1->is_valid()) {
639 assert(!r_2->is_valid(), "");
640 continue;
641 }
642
643 if (r_1->is_stack()) {
644 // memory to memory use fpu stack top
645 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
646
647 if (!r_2->is_valid()) {
648 __ movl(rdi, Address(rsp, ld_off));
649 __ movptr(Address(rsp, st_off), rdi);
650 } else {
651
652 // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
653 // st_off == MSW, st_off-wordSize == LSW
654
655 __ movptr(rdi, Address(rsp, ld_off));
656 __ movptr(Address(rsp, next_off), rdi);
657 #ifndef _LP64
658 __ movptr(rdi, Address(rsp, ld_off + wordSize));
659 __ movptr(Address(rsp, st_off), rdi);
660 #else
661 #ifdef ASSERT
662 // Overwrite the unused slot with known junk
663 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
664 __ movptr(Address(rsp, st_off), rax);
665 #endif /* ASSERT */
666 #endif // _LP64
667 }
668 } else if (r_1->is_Register()) {
669 Register r = r_1->as_Register();
670 if (!r_2->is_valid()) {
671 __ movl(Address(rsp, st_off), r);
672 } else {
673 // long/double in gpr
674 NOT_LP64(ShouldNotReachHere());
675 // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
676 // T_DOUBLE and T_LONG use two slots in the interpreter
677 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
678 // long/double in gpr
679 #ifdef ASSERT
680 // Overwrite the unused slot with known junk
681 LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
682 __ movptr(Address(rsp, st_off), rax);
683 #endif /* ASSERT */
684 __ movptr(Address(rsp, next_off), r);
685 } else {
686 __ movptr(Address(rsp, st_off), r);
687 }
688 }
689 } else {
690 assert(r_1->is_XMMRegister(), "");
691 if (!r_2->is_valid()) {
692 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
693 } else {
694 assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
695 move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
696 }
697 }
698 }
699
700 // Schedule the branch target address early.
701 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
702 // And repush original return address
703 __ push(rax);
704 __ jmp(rcx);
705 }
706
707
move_i2c_double(MacroAssembler * masm,XMMRegister r,Register saved_sp,int ld_off)708 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
709 int next_val_off = ld_off - Interpreter::stackElementSize;
710 __ movdbl(r, Address(saved_sp, next_val_off));
711 }
712
range_check(MacroAssembler * masm,Register pc_reg,Register temp_reg,address code_start,address code_end,Label & L_ok)713 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
714 address code_start, address code_end,
715 Label& L_ok) {
716 Label L_fail;
717 __ lea(temp_reg, ExternalAddress(code_start));
718 __ cmpptr(pc_reg, temp_reg);
719 __ jcc(Assembler::belowEqual, L_fail);
720 __ lea(temp_reg, ExternalAddress(code_end));
721 __ cmpptr(pc_reg, temp_reg);
722 __ jcc(Assembler::below, L_ok);
723 __ bind(L_fail);
724 }
725
gen_i2c_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs)726 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
727 int total_args_passed,
728 int comp_args_on_stack,
729 const BasicType *sig_bt,
730 const VMRegPair *regs) {
731 // Note: rsi contains the senderSP on entry. We must preserve it since
732 // we may do a i2c -> c2i transition if we lose a race where compiled
733 // code goes non-entrant while we get args ready.
734
735 // Adapters can be frameless because they do not require the caller
736 // to perform additional cleanup work, such as correcting the stack pointer.
737 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
738 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
739 // even if a callee has modified the stack pointer.
740 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
741 // routinely repairs its caller's stack pointer (from sender_sp, which is set
742 // up via the senderSP register).
743 // In other words, if *either* the caller or callee is interpreted, we can
744 // get the stack pointer repaired after a call.
745 // This is why c2i and i2c adapters cannot be indefinitely composed.
746 // In particular, if a c2i adapter were to somehow call an i2c adapter,
747 // both caller and callee would be compiled methods, and neither would
748 // clean up the stack pointer changes performed by the two adapters.
749 // If this happens, control eventually transfers back to the compiled
750 // caller, but with an uncorrected stack, causing delayed havoc.
751
752 // Pick up the return address
753 __ movptr(rax, Address(rsp, 0));
754
755 if (VerifyAdapterCalls &&
756 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
757 // So, let's test for cascading c2i/i2c adapters right now.
758 // assert(Interpreter::contains($return_addr) ||
759 // StubRoutines::contains($return_addr),
760 // "i2c adapter must return to an interpreter frame");
761 __ block_comment("verify_i2c { ");
762 Label L_ok;
763 if (Interpreter::code() != NULL)
764 range_check(masm, rax, rdi,
765 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
766 L_ok);
767 if (StubRoutines::code1() != NULL)
768 range_check(masm, rax, rdi,
769 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
770 L_ok);
771 if (StubRoutines::code2() != NULL)
772 range_check(masm, rax, rdi,
773 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
774 L_ok);
775 const char* msg = "i2c adapter must return to an interpreter frame";
776 __ block_comment(msg);
777 __ stop(msg);
778 __ bind(L_ok);
779 __ block_comment("} verify_i2ce ");
780 }
781
782 // Must preserve original SP for loading incoming arguments because
783 // we need to align the outgoing SP for compiled code.
784 __ movptr(rdi, rsp);
785
786 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
787 // in registers, we will occasionally have no stack args.
788 int comp_words_on_stack = 0;
789 if (comp_args_on_stack) {
790 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
791 // registers are below. By subtracting stack0, we either get a negative
792 // number (all values in registers) or the maximum stack slot accessed.
793 // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
794 // Convert 4-byte stack slots to words.
795 comp_words_on_stack = align_up(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
796 // Round up to miminum stack alignment, in wordSize
797 comp_words_on_stack = align_up(comp_words_on_stack, 2);
798 __ subptr(rsp, comp_words_on_stack * wordSize);
799 }
800
801 // Align the outgoing SP
802 __ andptr(rsp, -(StackAlignmentInBytes));
803
804 // push the return address on the stack (note that pushing, rather
805 // than storing it, yields the correct frame alignment for the callee)
806 __ push(rax);
807
808 // Put saved SP in another register
809 const Register saved_sp = rax;
810 __ movptr(saved_sp, rdi);
811
812
813 // Will jump to the compiled code just as if compiled code was doing it.
814 // Pre-load the register-jump target early, to schedule it better.
815 __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
816
817 // Now generate the shuffle code. Pick up all register args and move the
818 // rest through the floating point stack top.
819 for (int i = 0; i < total_args_passed; i++) {
820 if (sig_bt[i] == T_VOID) {
821 // Longs and doubles are passed in native word order, but misaligned
822 // in the 32-bit build.
823 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
824 continue;
825 }
826
827 // Pick up 0, 1 or 2 words from SP+offset.
828
829 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
830 "scrambled load targets?");
831 // Load in argument order going down.
832 int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
833 // Point to interpreter value (vs. tag)
834 int next_off = ld_off - Interpreter::stackElementSize;
835 //
836 //
837 //
838 VMReg r_1 = regs[i].first();
839 VMReg r_2 = regs[i].second();
840 if (!r_1->is_valid()) {
841 assert(!r_2->is_valid(), "");
842 continue;
843 }
844 if (r_1->is_stack()) {
845 // Convert stack slot to an SP offset (+ wordSize to account for return address )
846 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
847
848 // We can use rsi as a temp here because compiled code doesn't need rsi as an input
849 // and if we end up going thru a c2i because of a miss a reasonable value of rsi
850 // we be generated.
851 if (!r_2->is_valid()) {
852 // __ fld_s(Address(saved_sp, ld_off));
853 // __ fstp_s(Address(rsp, st_off));
854 __ movl(rsi, Address(saved_sp, ld_off));
855 __ movptr(Address(rsp, st_off), rsi);
856 } else {
857 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
858 // are accessed as negative so LSW is at LOW address
859
860 // ld_off is MSW so get LSW
861 // st_off is LSW (i.e. reg.first())
862 // __ fld_d(Address(saved_sp, next_off));
863 // __ fstp_d(Address(rsp, st_off));
864 //
865 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
866 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
867 // So we must adjust where to pick up the data to match the interpreter.
868 //
869 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
870 // are accessed as negative so LSW is at LOW address
871
872 // ld_off is MSW so get LSW
873 const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
874 next_off : ld_off;
875 __ movptr(rsi, Address(saved_sp, offset));
876 __ movptr(Address(rsp, st_off), rsi);
877 #ifndef _LP64
878 __ movptr(rsi, Address(saved_sp, ld_off));
879 __ movptr(Address(rsp, st_off + wordSize), rsi);
880 #endif // _LP64
881 }
882 } else if (r_1->is_Register()) { // Register argument
883 Register r = r_1->as_Register();
884 assert(r != rax, "must be different");
885 if (r_2->is_valid()) {
886 //
887 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
888 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
889 // So we must adjust where to pick up the data to match the interpreter.
890
891 const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
892 next_off : ld_off;
893
894 // this can be a misaligned move
895 __ movptr(r, Address(saved_sp, offset));
896 #ifndef _LP64
897 assert(r_2->as_Register() != rax, "need another temporary register");
898 // Remember r_1 is low address (and LSB on x86)
899 // So r_2 gets loaded from high address regardless of the platform
900 __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
901 #endif // _LP64
902 } else {
903 __ movl(r, Address(saved_sp, ld_off));
904 }
905 } else {
906 assert(r_1->is_XMMRegister(), "");
907 if (!r_2->is_valid()) {
908 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
909 } else {
910 move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
911 }
912 }
913 }
914
915 // 6243940 We might end up in handle_wrong_method if
916 // the callee is deoptimized as we race thru here. If that
917 // happens we don't want to take a safepoint because the
918 // caller frame will look interpreted and arguments are now
919 // "compiled" so it is much better to make this transition
920 // invisible to the stack walking code. Unfortunately if
921 // we try and find the callee by normal means a safepoint
922 // is possible. So we stash the desired callee in the thread
923 // and the vm will find there should this case occur.
924
925 __ get_thread(rax);
926 __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
927
928 // move Method* to rax, in case we end up in an c2i adapter.
929 // the c2i adapters expect Method* in rax, (c2) because c2's
930 // resolve stubs return the result (the method) in rax,.
931 // I'd love to fix this.
932 __ mov(rax, rbx);
933
934 __ jmp(rdi);
935 }
936
937 // ---------------------------------------------------------------
generate_i2c2i_adapters(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,AdapterFingerPrint * fingerprint)938 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
939 int total_args_passed,
940 int comp_args_on_stack,
941 const BasicType *sig_bt,
942 const VMRegPair *regs,
943 AdapterFingerPrint* fingerprint) {
944 address i2c_entry = __ pc();
945
946 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
947
948 // -------------------------------------------------------------------------
949 // Generate a C2I adapter. On entry we know rbx, holds the Method* during calls
950 // to the interpreter. The args start out packed in the compiled layout. They
951 // need to be unpacked into the interpreter layout. This will almost always
952 // require some stack space. We grow the current (compiled) stack, then repack
953 // the args. We finally end in a jump to the generic interpreter entry point.
954 // On exit from the interpreter, the interpreter will restore our SP (lest the
955 // compiled code, which relys solely on SP and not EBP, get sick).
956
957 address c2i_unverified_entry = __ pc();
958 Label skip_fixup;
959
960 Register holder = rax;
961 Register receiver = rcx;
962 Register temp = rbx;
963
964 {
965
966 Label missed;
967 __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
968 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
969 __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
970 __ jcc(Assembler::notEqual, missed);
971 // Method might have been compiled since the call site was patched to
972 // interpreted if that is the case treat it as a miss so we can get
973 // the call site corrected.
974 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
975 __ jcc(Assembler::equal, skip_fixup);
976
977 __ bind(missed);
978 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
979 }
980
981 address c2i_entry = __ pc();
982
983 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
984
985 __ flush();
986 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
987 }
988
c_calling_convention(const BasicType * sig_bt,VMRegPair * regs,VMRegPair * regs2,int total_args_passed)989 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
990 VMRegPair *regs,
991 VMRegPair *regs2,
992 int total_args_passed) {
993 assert(regs2 == NULL, "not needed on x86");
994 // We return the amount of VMRegImpl stack slots we need to reserve for all
995 // the arguments NOT counting out_preserve_stack_slots.
996
997 uint stack = 0; // All arguments on stack
998
999 for( int i = 0; i < total_args_passed; i++) {
1000 // From the type and the argument number (count) compute the location
1001 switch( sig_bt[i] ) {
1002 case T_BOOLEAN:
1003 case T_CHAR:
1004 case T_FLOAT:
1005 case T_BYTE:
1006 case T_SHORT:
1007 case T_INT:
1008 case T_OBJECT:
1009 case T_ARRAY:
1010 case T_ADDRESS:
1011 case T_METADATA:
1012 regs[i].set1(VMRegImpl::stack2reg(stack++));
1013 break;
1014 case T_LONG:
1015 case T_DOUBLE: // The stack numbering is reversed from Java
1016 // Since C arguments do not get reversed, the ordering for
1017 // doubles on the stack must be opposite the Java convention
1018 assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "missing Half" );
1019 regs[i].set2(VMRegImpl::stack2reg(stack));
1020 stack += 2;
1021 break;
1022 case T_VOID: regs[i].set_bad(); break;
1023 default:
1024 ShouldNotReachHere();
1025 break;
1026 }
1027 }
1028 return stack;
1029 }
1030
1031 // A simple move of integer like type
simple_move32(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1032 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1033 if (src.first()->is_stack()) {
1034 if (dst.first()->is_stack()) {
1035 // stack to stack
1036 // __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1037 // __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1038 __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
1039 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1040 } else {
1041 // stack to reg
1042 __ movl2ptr(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1043 }
1044 } else if (dst.first()->is_stack()) {
1045 // reg to stack
1046 // no need to sign extend on 64bit
1047 __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1048 } else {
1049 if (dst.first() != src.first()) {
1050 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1051 }
1052 }
1053 }
1054
1055 // An oop arg. Must pass a handle not the oop itself
object_move(MacroAssembler * masm,OopMap * map,int oop_handle_offset,int framesize_in_slots,VMRegPair src,VMRegPair dst,bool is_receiver,int * receiver_offset)1056 static void object_move(MacroAssembler* masm,
1057 OopMap* map,
1058 int oop_handle_offset,
1059 int framesize_in_slots,
1060 VMRegPair src,
1061 VMRegPair dst,
1062 bool is_receiver,
1063 int* receiver_offset) {
1064
1065 // Because of the calling conventions we know that src can be a
1066 // register or a stack location. dst can only be a stack location.
1067
1068 assert(dst.first()->is_stack(), "must be stack");
1069 // must pass a handle. First figure out the location we use as a handle
1070
1071 if (src.first()->is_stack()) {
1072 // Oop is already on the stack as an argument
1073 Register rHandle = rax;
1074 Label nil;
1075 __ xorptr(rHandle, rHandle);
1076 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1077 __ jcc(Assembler::equal, nil);
1078 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1079 __ bind(nil);
1080 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1081
1082 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1083 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1084 if (is_receiver) {
1085 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1086 }
1087 } else {
1088 // Oop is in an a register we must store it to the space we reserve
1089 // on the stack for oop_handles
1090 const Register rOop = src.first()->as_Register();
1091 const Register rHandle = rax;
1092 int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1093 int offset = oop_slot*VMRegImpl::stack_slot_size;
1094 Label skip;
1095 __ movptr(Address(rsp, offset), rOop);
1096 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1097 __ xorptr(rHandle, rHandle);
1098 __ cmpptr(rOop, (int32_t)NULL_WORD);
1099 __ jcc(Assembler::equal, skip);
1100 __ lea(rHandle, Address(rsp, offset));
1101 __ bind(skip);
1102 // Store the handle parameter
1103 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1104 if (is_receiver) {
1105 *receiver_offset = offset;
1106 }
1107 }
1108 }
1109
1110 // A float arg may have to do float reg int reg conversion
float_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1111 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1112 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1113
1114 // Because of the calling convention we know that src is either a stack location
1115 // or an xmm register. dst can only be a stack location.
1116
1117 assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1118
1119 if (src.first()->is_stack()) {
1120 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1121 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1122 } else {
1123 // reg to stack
1124 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1125 }
1126 }
1127
1128 // A long move
long_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1129 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1130
1131 // The only legal possibility for a long_move VMRegPair is:
1132 // 1: two stack slots (possibly unaligned)
1133 // as neither the java or C calling convention will use registers
1134 // for longs.
1135
1136 if (src.first()->is_stack() && dst.first()->is_stack()) {
1137 assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1138 __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1139 NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1140 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1141 NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1142 } else {
1143 ShouldNotReachHere();
1144 }
1145 }
1146
1147 // A double move
double_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1148 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1149
1150 // The only legal possibilities for a double_move VMRegPair are:
1151 // The painful thing here is that like long_move a VMRegPair might be
1152
1153 // Because of the calling convention we know that src is either
1154 // 1: a single physical register (xmm registers only)
1155 // 2: two stack slots (possibly unaligned)
1156 // dst can only be a pair of stack slots.
1157
1158 assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1159
1160 if (src.first()->is_stack()) {
1161 // source is all stack
1162 __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1163 NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1164 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1165 NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1166 } else {
1167 // reg to stack
1168 // No worries about stack alignment
1169 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1170 }
1171 }
1172
1173
save_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1174 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1175 // We always ignore the frame_slots arg and just use the space just below frame pointer
1176 // which by this time is free to use
1177 switch (ret_type) {
1178 case T_FLOAT:
1179 __ fstp_s(Address(rbp, -wordSize));
1180 break;
1181 case T_DOUBLE:
1182 __ fstp_d(Address(rbp, -2*wordSize));
1183 break;
1184 case T_VOID: break;
1185 case T_LONG:
1186 __ movptr(Address(rbp, -wordSize), rax);
1187 NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx));
1188 break;
1189 default: {
1190 __ movptr(Address(rbp, -wordSize), rax);
1191 }
1192 }
1193 }
1194
restore_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1195 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1196 // We always ignore the frame_slots arg and just use the space just below frame pointer
1197 // which by this time is free to use
1198 switch (ret_type) {
1199 case T_FLOAT:
1200 __ fld_s(Address(rbp, -wordSize));
1201 break;
1202 case T_DOUBLE:
1203 __ fld_d(Address(rbp, -2*wordSize));
1204 break;
1205 case T_LONG:
1206 __ movptr(rax, Address(rbp, -wordSize));
1207 NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize)));
1208 break;
1209 case T_VOID: break;
1210 default: {
1211 __ movptr(rax, Address(rbp, -wordSize));
1212 }
1213 }
1214 }
1215
1216
save_or_restore_arguments(MacroAssembler * masm,const int stack_slots,const int total_in_args,const int arg_save_area,OopMap * map,VMRegPair * in_regs,BasicType * in_sig_bt)1217 static void save_or_restore_arguments(MacroAssembler* masm,
1218 const int stack_slots,
1219 const int total_in_args,
1220 const int arg_save_area,
1221 OopMap* map,
1222 VMRegPair* in_regs,
1223 BasicType* in_sig_bt) {
1224 // if map is non-NULL then the code should store the values,
1225 // otherwise it should load them.
1226 int handle_index = 0;
1227 // Save down double word first
1228 for ( int i = 0; i < total_in_args; i++) {
1229 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1230 int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1231 int offset = slot * VMRegImpl::stack_slot_size;
1232 handle_index += 2;
1233 assert(handle_index <= stack_slots, "overflow");
1234 if (map != NULL) {
1235 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1236 } else {
1237 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1238 }
1239 }
1240 if (in_regs[i].first()->is_Register() && in_sig_bt[i] == T_LONG) {
1241 int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1242 int offset = slot * VMRegImpl::stack_slot_size;
1243 handle_index += 2;
1244 assert(handle_index <= stack_slots, "overflow");
1245 if (map != NULL) {
1246 __ movl(Address(rsp, offset), in_regs[i].first()->as_Register());
1247 if (in_regs[i].second()->is_Register()) {
1248 __ movl(Address(rsp, offset + 4), in_regs[i].second()->as_Register());
1249 }
1250 } else {
1251 __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
1252 if (in_regs[i].second()->is_Register()) {
1253 __ movl(in_regs[i].second()->as_Register(), Address(rsp, offset + 4));
1254 }
1255 }
1256 }
1257 }
1258 // Save or restore single word registers
1259 for ( int i = 0; i < total_in_args; i++) {
1260 if (in_regs[i].first()->is_Register()) {
1261 int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1262 int offset = slot * VMRegImpl::stack_slot_size;
1263 assert(handle_index <= stack_slots, "overflow");
1264 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1265 map->set_oop(VMRegImpl::stack2reg(slot));;
1266 }
1267
1268 // Value is in an input register pass we must flush it to the stack
1269 const Register reg = in_regs[i].first()->as_Register();
1270 switch (in_sig_bt[i]) {
1271 case T_ARRAY:
1272 if (map != NULL) {
1273 __ movptr(Address(rsp, offset), reg);
1274 } else {
1275 __ movptr(reg, Address(rsp, offset));
1276 }
1277 break;
1278 case T_BOOLEAN:
1279 case T_CHAR:
1280 case T_BYTE:
1281 case T_SHORT:
1282 case T_INT:
1283 if (map != NULL) {
1284 __ movl(Address(rsp, offset), reg);
1285 } else {
1286 __ movl(reg, Address(rsp, offset));
1287 }
1288 break;
1289 case T_OBJECT:
1290 default: ShouldNotReachHere();
1291 }
1292 } else if (in_regs[i].first()->is_XMMRegister()) {
1293 if (in_sig_bt[i] == T_FLOAT) {
1294 int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1295 int offset = slot * VMRegImpl::stack_slot_size;
1296 assert(handle_index <= stack_slots, "overflow");
1297 if (map != NULL) {
1298 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1299 } else {
1300 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1301 }
1302 }
1303 } else if (in_regs[i].first()->is_stack()) {
1304 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1305 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1306 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1307 }
1308 }
1309 }
1310 }
1311
1312 // Check GCLocker::needs_gc and enter the runtime if it's true. This
1313 // keeps a new JNI critical region from starting until a GC has been
1314 // forced. Save down any oops in registers and describe them in an
1315 // OopMap.
check_needs_gc_for_critical_native(MacroAssembler * masm,Register thread,int stack_slots,int total_c_args,int total_in_args,int arg_save_area,OopMapSet * oop_maps,VMRegPair * in_regs,BasicType * in_sig_bt)1316 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1317 Register thread,
1318 int stack_slots,
1319 int total_c_args,
1320 int total_in_args,
1321 int arg_save_area,
1322 OopMapSet* oop_maps,
1323 VMRegPair* in_regs,
1324 BasicType* in_sig_bt) {
1325 __ block_comment("check GCLocker::needs_gc");
1326 Label cont;
1327 __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1328 __ jcc(Assembler::equal, cont);
1329
1330 // Save down any incoming oops and call into the runtime to halt for a GC
1331
1332 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1333
1334 save_or_restore_arguments(masm, stack_slots, total_in_args,
1335 arg_save_area, map, in_regs, in_sig_bt);
1336
1337 address the_pc = __ pc();
1338 oop_maps->add_gc_map( __ offset(), map);
1339 __ set_last_Java_frame(thread, rsp, noreg, the_pc);
1340
1341 __ block_comment("block_for_jni_critical");
1342 __ push(thread);
1343 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1344 __ increment(rsp, wordSize);
1345
1346 __ get_thread(thread);
1347 __ reset_last_Java_frame(thread, false);
1348
1349 save_or_restore_arguments(masm, stack_slots, total_in_args,
1350 arg_save_area, NULL, in_regs, in_sig_bt);
1351
1352 __ bind(cont);
1353 #ifdef ASSERT
1354 if (StressCriticalJNINatives) {
1355 // Stress register saving
1356 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1357 save_or_restore_arguments(masm, stack_slots, total_in_args,
1358 arg_save_area, map, in_regs, in_sig_bt);
1359 // Destroy argument registers
1360 for (int i = 0; i < total_in_args - 1; i++) {
1361 if (in_regs[i].first()->is_Register()) {
1362 const Register reg = in_regs[i].first()->as_Register();
1363 __ xorptr(reg, reg);
1364 } else if (in_regs[i].first()->is_XMMRegister()) {
1365 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1366 } else if (in_regs[i].first()->is_FloatRegister()) {
1367 ShouldNotReachHere();
1368 } else if (in_regs[i].first()->is_stack()) {
1369 // Nothing to do
1370 } else {
1371 ShouldNotReachHere();
1372 }
1373 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1374 i++;
1375 }
1376 }
1377
1378 save_or_restore_arguments(masm, stack_slots, total_in_args,
1379 arg_save_area, NULL, in_regs, in_sig_bt);
1380 }
1381 #endif
1382 }
1383
1384 // Unpack an array argument into a pointer to the body and the length
1385 // if the array is non-null, otherwise pass 0 for both.
unpack_array_argument(MacroAssembler * masm,VMRegPair reg,BasicType in_elem_type,VMRegPair body_arg,VMRegPair length_arg)1386 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1387 Register tmp_reg = rax;
1388 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1389 "possible collision");
1390 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1391 "possible collision");
1392
1393 // Pass the length, ptr pair
1394 Label is_null, done;
1395 VMRegPair tmp(tmp_reg->as_VMReg());
1396 if (reg.first()->is_stack()) {
1397 // Load the arg up from the stack
1398 simple_move32(masm, reg, tmp);
1399 reg = tmp;
1400 }
1401 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1402 __ jccb(Assembler::equal, is_null);
1403 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1404 simple_move32(masm, tmp, body_arg);
1405 // load the length relative to the body.
1406 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1407 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1408 simple_move32(masm, tmp, length_arg);
1409 __ jmpb(done);
1410 __ bind(is_null);
1411 // Pass zeros
1412 __ xorptr(tmp_reg, tmp_reg);
1413 simple_move32(masm, tmp, body_arg);
1414 simple_move32(masm, tmp, length_arg);
1415 __ bind(done);
1416 }
1417
verify_oop_args(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1418 static void verify_oop_args(MacroAssembler* masm,
1419 const methodHandle& method,
1420 const BasicType* sig_bt,
1421 const VMRegPair* regs) {
1422 Register temp_reg = rbx; // not part of any compiled calling seq
1423 if (VerifyOops) {
1424 for (int i = 0; i < method->size_of_parameters(); i++) {
1425 if (sig_bt[i] == T_OBJECT ||
1426 sig_bt[i] == T_ARRAY) {
1427 VMReg r = regs[i].first();
1428 assert(r->is_valid(), "bad oop arg");
1429 if (r->is_stack()) {
1430 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1431 __ verify_oop(temp_reg);
1432 } else {
1433 __ verify_oop(r->as_Register());
1434 }
1435 }
1436 }
1437 }
1438 }
1439
gen_special_dispatch(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1440 static void gen_special_dispatch(MacroAssembler* masm,
1441 const methodHandle& method,
1442 const BasicType* sig_bt,
1443 const VMRegPair* regs) {
1444 verify_oop_args(masm, method, sig_bt, regs);
1445 vmIntrinsics::ID iid = method->intrinsic_id();
1446
1447 // Now write the args into the outgoing interpreter space
1448 bool has_receiver = false;
1449 Register receiver_reg = noreg;
1450 int member_arg_pos = -1;
1451 Register member_reg = noreg;
1452 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1453 if (ref_kind != 0) {
1454 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1455 member_reg = rbx; // known to be free at this point
1456 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1457 } else if (iid == vmIntrinsics::_invokeBasic) {
1458 has_receiver = true;
1459 } else {
1460 fatal("unexpected intrinsic id %d", iid);
1461 }
1462
1463 if (member_reg != noreg) {
1464 // Load the member_arg into register, if necessary.
1465 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1466 VMReg r = regs[member_arg_pos].first();
1467 if (r->is_stack()) {
1468 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1469 } else {
1470 // no data motion is needed
1471 member_reg = r->as_Register();
1472 }
1473 }
1474
1475 if (has_receiver) {
1476 // Make sure the receiver is loaded into a register.
1477 assert(method->size_of_parameters() > 0, "oob");
1478 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1479 VMReg r = regs[0].first();
1480 assert(r->is_valid(), "bad receiver arg");
1481 if (r->is_stack()) {
1482 // Porting note: This assumes that compiled calling conventions always
1483 // pass the receiver oop in a register. If this is not true on some
1484 // platform, pick a temp and load the receiver from stack.
1485 fatal("receiver always in a register");
1486 receiver_reg = rcx; // known to be free at this point
1487 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1488 } else {
1489 // no data motion is needed
1490 receiver_reg = r->as_Register();
1491 }
1492 }
1493
1494 // Figure out which address we are really jumping to:
1495 MethodHandles::generate_method_handle_dispatch(masm, iid,
1496 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1497 }
1498
1499 // ---------------------------------------------------------------------------
1500 // Generate a native wrapper for a given method. The method takes arguments
1501 // in the Java compiled code convention, marshals them to the native
1502 // convention (handlizes oops, etc), transitions to native, makes the call,
1503 // returns to java state (possibly blocking), unhandlizes any result and
1504 // returns.
1505 //
1506 // Critical native functions are a shorthand for the use of
1507 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1508 // functions. The wrapper is expected to unpack the arguments before
1509 // passing them to the callee and perform checks before and after the
1510 // native call to ensure that they GCLocker
1511 // lock_critical/unlock_critical semantics are followed. Some other
1512 // parts of JNI setup are skipped like the tear down of the JNI handle
1513 // block and the check for pending exceptions it's impossible for them
1514 // to be thrown.
1515 //
1516 // They are roughly structured like this:
1517 // if (GCLocker::needs_gc())
1518 // SharedRuntime::block_for_jni_critical();
1519 // tranistion to thread_in_native
1520 // unpack arrray arguments and call native entry point
1521 // check for safepoint in progress
1522 // check if any thread suspend flags are set
1523 // call into JVM and possible unlock the JNI critical
1524 // if a GC was suppressed while in the critical native.
1525 // transition back to thread_in_Java
1526 // return to caller
1527 //
generate_native_wrapper(MacroAssembler * masm,const methodHandle & method,int compile_id,BasicType * in_sig_bt,VMRegPair * in_regs,BasicType ret_type,address critical_entry)1528 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1529 const methodHandle& method,
1530 int compile_id,
1531 BasicType* in_sig_bt,
1532 VMRegPair* in_regs,
1533 BasicType ret_type,
1534 address critical_entry) {
1535 if (method->is_method_handle_intrinsic()) {
1536 vmIntrinsics::ID iid = method->intrinsic_id();
1537 intptr_t start = (intptr_t)__ pc();
1538 int vep_offset = ((intptr_t)__ pc()) - start;
1539 gen_special_dispatch(masm,
1540 method,
1541 in_sig_bt,
1542 in_regs);
1543 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1544 __ flush();
1545 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1546 return nmethod::new_native_nmethod(method,
1547 compile_id,
1548 masm->code(),
1549 vep_offset,
1550 frame_complete,
1551 stack_slots / VMRegImpl::slots_per_word,
1552 in_ByteSize(-1),
1553 in_ByteSize(-1),
1554 (OopMapSet*)NULL);
1555 }
1556 bool is_critical_native = true;
1557 address native_func = critical_entry;
1558 if (native_func == NULL) {
1559 native_func = method->native_function();
1560 is_critical_native = false;
1561 }
1562 assert(native_func != NULL, "must have function");
1563
1564 // An OopMap for lock (and class if static)
1565 OopMapSet *oop_maps = new OopMapSet();
1566
1567 // We have received a description of where all the java arg are located
1568 // on entry to the wrapper. We need to convert these args to where
1569 // the jni function will expect them. To figure out where they go
1570 // we convert the java signature to a C signature by inserting
1571 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1572
1573 const int total_in_args = method->size_of_parameters();
1574 int total_c_args = total_in_args;
1575 if (!is_critical_native) {
1576 total_c_args += 1;
1577 if (method->is_static()) {
1578 total_c_args++;
1579 }
1580 } else {
1581 for (int i = 0; i < total_in_args; i++) {
1582 if (in_sig_bt[i] == T_ARRAY) {
1583 total_c_args++;
1584 }
1585 }
1586 }
1587
1588 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1589 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1590 BasicType* in_elem_bt = NULL;
1591
1592 int argc = 0;
1593 if (!is_critical_native) {
1594 out_sig_bt[argc++] = T_ADDRESS;
1595 if (method->is_static()) {
1596 out_sig_bt[argc++] = T_OBJECT;
1597 }
1598
1599 for (int i = 0; i < total_in_args ; i++ ) {
1600 out_sig_bt[argc++] = in_sig_bt[i];
1601 }
1602 } else {
1603 Thread* THREAD = Thread::current();
1604 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1605 SignatureStream ss(method->signature());
1606 for (int i = 0; i < total_in_args ; i++ ) {
1607 if (in_sig_bt[i] == T_ARRAY) {
1608 // Arrays are passed as int, elem* pair
1609 out_sig_bt[argc++] = T_INT;
1610 out_sig_bt[argc++] = T_ADDRESS;
1611 Symbol* atype = ss.as_symbol(CHECK_NULL);
1612 const char* at = atype->as_C_string();
1613 if (strlen(at) == 2) {
1614 assert(at[0] == '[', "must be");
1615 switch (at[1]) {
1616 case 'B': in_elem_bt[i] = T_BYTE; break;
1617 case 'C': in_elem_bt[i] = T_CHAR; break;
1618 case 'D': in_elem_bt[i] = T_DOUBLE; break;
1619 case 'F': in_elem_bt[i] = T_FLOAT; break;
1620 case 'I': in_elem_bt[i] = T_INT; break;
1621 case 'J': in_elem_bt[i] = T_LONG; break;
1622 case 'S': in_elem_bt[i] = T_SHORT; break;
1623 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
1624 default: ShouldNotReachHere();
1625 }
1626 }
1627 } else {
1628 out_sig_bt[argc++] = in_sig_bt[i];
1629 in_elem_bt[i] = T_VOID;
1630 }
1631 if (in_sig_bt[i] != T_VOID) {
1632 assert(in_sig_bt[i] == ss.type(), "must match");
1633 ss.next();
1634 }
1635 }
1636 }
1637
1638 // Now figure out where the args must be stored and how much stack space
1639 // they require.
1640 int out_arg_slots;
1641 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1642
1643 // Compute framesize for the wrapper. We need to handlize all oops in
1644 // registers a max of 2 on x86.
1645
1646 // Calculate the total number of stack slots we will need.
1647
1648 // First count the abi requirement plus all of the outgoing args
1649 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1650
1651 // Now the space for the inbound oop handle area
1652 int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1653 if (is_critical_native) {
1654 // Critical natives may have to call out so they need a save area
1655 // for register arguments.
1656 int double_slots = 0;
1657 int single_slots = 0;
1658 for ( int i = 0; i < total_in_args; i++) {
1659 if (in_regs[i].first()->is_Register()) {
1660 const Register reg = in_regs[i].first()->as_Register();
1661 switch (in_sig_bt[i]) {
1662 case T_ARRAY: // critical array (uses 2 slots on LP64)
1663 case T_BOOLEAN:
1664 case T_BYTE:
1665 case T_SHORT:
1666 case T_CHAR:
1667 case T_INT: single_slots++; break;
1668 case T_LONG: double_slots++; break;
1669 default: ShouldNotReachHere();
1670 }
1671 } else if (in_regs[i].first()->is_XMMRegister()) {
1672 switch (in_sig_bt[i]) {
1673 case T_FLOAT: single_slots++; break;
1674 case T_DOUBLE: double_slots++; break;
1675 default: ShouldNotReachHere();
1676 }
1677 } else if (in_regs[i].first()->is_FloatRegister()) {
1678 ShouldNotReachHere();
1679 }
1680 }
1681 total_save_slots = double_slots * 2 + single_slots;
1682 // align the save area
1683 if (double_slots != 0) {
1684 stack_slots = align_up(stack_slots, 2);
1685 }
1686 }
1687
1688 int oop_handle_offset = stack_slots;
1689 stack_slots += total_save_slots;
1690
1691 // Now any space we need for handlizing a klass if static method
1692
1693 int klass_slot_offset = 0;
1694 int klass_offset = -1;
1695 int lock_slot_offset = 0;
1696 bool is_static = false;
1697
1698 if (method->is_static()) {
1699 klass_slot_offset = stack_slots;
1700 stack_slots += VMRegImpl::slots_per_word;
1701 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1702 is_static = true;
1703 }
1704
1705 // Plus a lock if needed
1706
1707 if (method->is_synchronized()) {
1708 lock_slot_offset = stack_slots;
1709 stack_slots += VMRegImpl::slots_per_word;
1710 }
1711
1712 // Now a place (+2) to save return values or temp during shuffling
1713 // + 2 for return address (which we own) and saved rbp,
1714 stack_slots += 4;
1715
1716 // Ok The space we have allocated will look like:
1717 //
1718 //
1719 // FP-> | |
1720 // |---------------------|
1721 // | 2 slots for moves |
1722 // |---------------------|
1723 // | lock box (if sync) |
1724 // |---------------------| <- lock_slot_offset (-lock_slot_rbp_offset)
1725 // | klass (if static) |
1726 // |---------------------| <- klass_slot_offset
1727 // | oopHandle area |
1728 // |---------------------| <- oop_handle_offset (a max of 2 registers)
1729 // | outbound memory |
1730 // | based arguments |
1731 // | |
1732 // |---------------------|
1733 // | |
1734 // SP-> | out_preserved_slots |
1735 //
1736 //
1737 // ****************************************************************************
1738 // WARNING - on Windows Java Natives use pascal calling convention and pop the
1739 // arguments off of the stack after the jni call. Before the call we can use
1740 // instructions that are SP relative. After the jni call we switch to FP
1741 // relative instructions instead of re-adjusting the stack on windows.
1742 // ****************************************************************************
1743
1744
1745 // Now compute actual number of stack words we need rounding to make
1746 // stack properly aligned.
1747 stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1748
1749 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1750
1751 intptr_t start = (intptr_t)__ pc();
1752
1753 // First thing make an ic check to see if we should even be here
1754
1755 // We are free to use all registers as temps without saving them and
1756 // restoring them except rbp. rbp is the only callee save register
1757 // as far as the interpreter and the compiler(s) are concerned.
1758
1759
1760 const Register ic_reg = rax;
1761 const Register receiver = rcx;
1762 Label hit;
1763 Label exception_pending;
1764
1765 __ verify_oop(receiver);
1766 __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
1767 __ jcc(Assembler::equal, hit);
1768
1769 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1770
1771 // verified entry must be aligned for code patching.
1772 // and the first 5 bytes must be in the same cache line
1773 // if we align at 8 then we will be sure 5 bytes are in the same line
1774 __ align(8);
1775
1776 __ bind(hit);
1777
1778 int vep_offset = ((intptr_t)__ pc()) - start;
1779
1780 #ifdef COMPILER1
1781 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
1782 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1783 inline_check_hashcode_from_object_header(masm, method, rcx /*obj_reg*/, rax /*result*/);
1784 }
1785 #endif // COMPILER1
1786
1787 // The instruction at the verified entry point must be 5 bytes or longer
1788 // because it can be patched on the fly by make_non_entrant. The stack bang
1789 // instruction fits that requirement.
1790
1791 // Generate stack overflow check
1792
1793 if (UseStackBanging) {
1794 __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
1795 } else {
1796 // need a 5 byte instruction to allow MT safe patching to non-entrant
1797 __ fat_nop();
1798 }
1799
1800 // Generate a new frame for the wrapper.
1801 __ enter();
1802 // -2 because return address is already present and so is saved rbp
1803 __ subptr(rsp, stack_size - 2*wordSize);
1804
1805 // Frame is now completed as far as size and linkage.
1806 int frame_complete = ((intptr_t)__ pc()) - start;
1807
1808 if (UseRTMLocking) {
1809 // Abort RTM transaction before calling JNI
1810 // because critical section will be large and will be
1811 // aborted anyway. Also nmethod could be deoptimized.
1812 __ xabort(0);
1813 }
1814
1815 // Calculate the difference between rsp and rbp,. We need to know it
1816 // after the native call because on windows Java Natives will pop
1817 // the arguments and it is painful to do rsp relative addressing
1818 // in a platform independent way. So after the call we switch to
1819 // rbp, relative addressing.
1820
1821 int fp_adjustment = stack_size - 2*wordSize;
1822
1823 #ifdef COMPILER2
1824 // C2 may leave the stack dirty if not in SSE2+ mode
1825 if (UseSSE >= 2) {
1826 __ verify_FPU(0, "c2i transition should have clean FPU stack");
1827 } else {
1828 __ empty_FPU_stack();
1829 }
1830 #endif /* COMPILER2 */
1831
1832 // Compute the rbp, offset for any slots used after the jni call
1833
1834 int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1835
1836 // We use rdi as a thread pointer because it is callee save and
1837 // if we load it once it is usable thru the entire wrapper
1838 const Register thread = rdi;
1839
1840 // We use rsi as the oop handle for the receiver/klass
1841 // It is callee save so it survives the call to native
1842
1843 const Register oop_handle_reg = rsi;
1844
1845 __ get_thread(thread);
1846
1847 if (is_critical_native SHENANDOAHGC_ONLY(&& !UseShenandoahGC)) {
1848 check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
1849 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1850 }
1851
1852 //
1853 // We immediately shuffle the arguments so that any vm call we have to
1854 // make from here on out (sync slow path, jvmti, etc.) we will have
1855 // captured the oops from our caller and have a valid oopMap for
1856 // them.
1857
1858 // -----------------
1859 // The Grand Shuffle
1860 //
1861 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1862 // and, if static, the class mirror instead of a receiver. This pretty much
1863 // guarantees that register layout will not match (and x86 doesn't use reg
1864 // parms though amd does). Since the native abi doesn't use register args
1865 // and the java conventions does we don't have to worry about collisions.
1866 // All of our moved are reg->stack or stack->stack.
1867 // We ignore the extra arguments during the shuffle and handle them at the
1868 // last moment. The shuffle is described by the two calling convention
1869 // vectors we have in our possession. We simply walk the java vector to
1870 // get the source locations and the c vector to get the destinations.
1871
1872 int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
1873
1874 // Record rsp-based slot for receiver on stack for non-static methods
1875 int receiver_offset = -1;
1876
1877 // This is a trick. We double the stack slots so we can claim
1878 // the oops in the caller's frame. Since we are sure to have
1879 // more args than the caller doubling is enough to make
1880 // sure we can capture all the incoming oop args from the
1881 // caller.
1882 //
1883 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1884
1885 #if INCLUDE_SHENANDOAHGC
1886 // Inbound arguments that need to be pinned for critical natives
1887 GrowableArray<int> pinned_args(total_in_args);
1888 // Current stack slot for storing register based array argument
1889 int pinned_slot = oop_handle_offset;
1890 #endif
1891 // Mark location of rbp,
1892 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1893
1894 // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1895 // Are free to temporaries if we have to do stack to steck moves.
1896 // All inbound args are referenced based on rbp, and all outbound args via rsp.
1897
1898 for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
1899 switch (in_sig_bt[i]) {
1900 case T_ARRAY:
1901 if (is_critical_native) {
1902 #if INCLUDE_SHENANDOAHGC
1903 VMRegPair in_arg = in_regs[i];
1904 if (UseShenandoahGC) {
1905 // gen_pin_object handles save and restore
1906 // of any clobbered registers
1907 ShenandoahBarrierSet::assembler()->gen_pin_object(masm, thread, in_arg);
1908 pinned_args.append(i);
1909
1910 // rax has pinned array
1911 VMRegPair result_reg(rax->as_VMReg());
1912 if (!in_arg.first()->is_stack()) {
1913 assert(pinned_slot <= stack_slots, "overflow");
1914 simple_move32(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
1915 pinned_slot += VMRegImpl::slots_per_word;
1916 } else {
1917 // Write back pinned value, it will be used to unpin this argument
1918 __ movptr(Address(rbp, reg2offset_in(in_arg.first())), result_reg.first()->as_Register());
1919 }
1920 // We have the array in register, use it
1921 in_arg = result_reg;
1922 }
1923 unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1924 #else
1925 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1926 #endif
1927 c_arg++;
1928 break;
1929 }
1930 case T_OBJECT:
1931 assert(!is_critical_native, "no oop arguments");
1932 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1933 ((i == 0) && (!is_static)),
1934 &receiver_offset);
1935 break;
1936 case T_VOID:
1937 break;
1938
1939 case T_FLOAT:
1940 float_move(masm, in_regs[i], out_regs[c_arg]);
1941 break;
1942
1943 case T_DOUBLE:
1944 assert( i + 1 < total_in_args &&
1945 in_sig_bt[i + 1] == T_VOID &&
1946 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1947 double_move(masm, in_regs[i], out_regs[c_arg]);
1948 break;
1949
1950 case T_LONG :
1951 long_move(masm, in_regs[i], out_regs[c_arg]);
1952 break;
1953
1954 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1955
1956 default:
1957 simple_move32(masm, in_regs[i], out_regs[c_arg]);
1958 }
1959 }
1960
1961 // Pre-load a static method's oop into rsi. Used both by locking code and
1962 // the normal JNI call code.
1963 if (method->is_static() && !is_critical_native) {
1964
1965 // load opp into a register
1966 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
1967
1968 // Now handlize the static class mirror it's known not-null.
1969 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1970 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1971
1972 // Now get the handle
1973 __ lea(oop_handle_reg, Address(rsp, klass_offset));
1974 // store the klass handle as second argument
1975 __ movptr(Address(rsp, wordSize), oop_handle_reg);
1976 }
1977
1978 // Change state to native (we save the return address in the thread, since it might not
1979 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1980 // points into the right code segment. It does not have to be the correct return pc.
1981 // We use the same pc/oopMap repeatedly when we call out
1982
1983 intptr_t the_pc = (intptr_t) __ pc();
1984 oop_maps->add_gc_map(the_pc - start, map);
1985
1986 __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc);
1987
1988
1989 // We have all of the arguments setup at this point. We must not touch any register
1990 // argument registers at this point (what if we save/restore them there are no oop?
1991
1992 {
1993 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
1994 __ mov_metadata(rax, method());
1995 __ call_VM_leaf(
1996 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1997 thread, rax);
1998 }
1999
2000 // RedefineClasses() tracing support for obsolete method entry
2001 if (log_is_enabled(Trace, redefine, class, obsolete)) {
2002 __ mov_metadata(rax, method());
2003 __ call_VM_leaf(
2004 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2005 thread, rax);
2006 }
2007
2008 // These are register definitions we need for locking/unlocking
2009 const Register swap_reg = rax; // Must use rax, for cmpxchg instruction
2010 const Register obj_reg = rcx; // Will contain the oop
2011 const Register lock_reg = rdx; // Address of compiler lock object (BasicLock)
2012
2013 Label slow_path_lock;
2014 Label lock_done;
2015
2016 // Lock a synchronized method
2017 if (method->is_synchronized()) {
2018 assert(!is_critical_native, "unhandled");
2019
2020
2021 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2022
2023 // Get the handle (the 2nd argument)
2024 __ movptr(oop_handle_reg, Address(rsp, wordSize));
2025
2026 // Get address of the box
2027
2028 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
2029
2030 // Load the oop from the handle
2031 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2032
2033 if (UseBiasedLocking) {
2034 // Note that oop_handle_reg is trashed during this call
2035 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock);
2036 }
2037
2038 // Load immediate 1 into swap_reg %rax,
2039 __ movptr(swap_reg, 1);
2040
2041 // Load (object->mark() | 1) into swap_reg %rax,
2042 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2043
2044 // Save (object->mark() | 1) into BasicLock's displaced header
2045 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2046
2047 if (os::is_MP()) {
2048 __ lock();
2049 }
2050
2051 // src -> dest iff dest == rax, else rax, <- dest
2052 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
2053 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2054 __ jcc(Assembler::equal, lock_done);
2055
2056 // Test if the oopMark is an obvious stack pointer, i.e.,
2057 // 1) (mark & 3) == 0, and
2058 // 2) rsp <= mark < mark + os::pagesize()
2059 // These 3 tests can be done by evaluating the following
2060 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2061 // assuming both stack pointer and pagesize have their
2062 // least significant 2 bits clear.
2063 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
2064
2065 __ subptr(swap_reg, rsp);
2066 __ andptr(swap_reg, 3 - os::vm_page_size());
2067
2068 // Save the test result, for recursive case, the result is zero
2069 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2070 __ jcc(Assembler::notEqual, slow_path_lock);
2071 // Slow path will re-enter here
2072 __ bind(lock_done);
2073
2074 if (UseBiasedLocking) {
2075 // Re-fetch oop_handle_reg as we trashed it above
2076 __ movptr(oop_handle_reg, Address(rsp, wordSize));
2077 }
2078 }
2079
2080
2081 // Finally just about ready to make the JNI call
2082
2083
2084 // get JNIEnv* which is first argument to native
2085 if (!is_critical_native) {
2086 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
2087 __ movptr(Address(rsp, 0), rdx);
2088 }
2089
2090 // Now set thread in native
2091 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
2092
2093 __ call(RuntimeAddress(native_func));
2094
2095 // Verify or restore cpu control state after JNI call
2096 __ restore_cpu_control_state_after_jni();
2097
2098 // WARNING - on Windows Java Natives use pascal calling convention and pop the
2099 // arguments off of the stack. We could just re-adjust the stack pointer here
2100 // and continue to do SP relative addressing but we instead switch to FP
2101 // relative addressing.
2102
2103 // Unpack native results.
2104 switch (ret_type) {
2105 case T_BOOLEAN: __ c2bool(rax); break;
2106 case T_CHAR : __ andptr(rax, 0xFFFF); break;
2107 case T_BYTE : __ sign_extend_byte (rax); break;
2108 case T_SHORT : __ sign_extend_short(rax); break;
2109 case T_INT : /* nothing to do */ break;
2110 case T_DOUBLE :
2111 case T_FLOAT :
2112 // Result is in st0 we'll save as needed
2113 break;
2114 case T_ARRAY: // Really a handle
2115 case T_OBJECT: // Really a handle
2116 break; // can't de-handlize until after safepoint check
2117 case T_VOID: break;
2118 case T_LONG: break;
2119 default : ShouldNotReachHere();
2120 }
2121
2122 #if INCLUDE_SHENANDOAHGC
2123 if (UseShenandoahGC) {
2124 // unpin pinned arguments
2125 pinned_slot = oop_handle_offset;
2126 if (pinned_args.length() > 0) {
2127 // save return value that may be overwritten otherwise.
2128 save_native_result(masm, ret_type, stack_slots);
2129 for (int index = 0; index < pinned_args.length(); index ++) {
2130 int i = pinned_args.at(index);
2131 assert(pinned_slot <= stack_slots, "overflow");
2132 if (!in_regs[i].first()->is_stack()) {
2133 int offset = pinned_slot * VMRegImpl::stack_slot_size;
2134 __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
2135 pinned_slot += VMRegImpl::slots_per_word;
2136 }
2137 // gen_pin_object handles save and restore
2138 // of any other clobbered registers
2139 ShenandoahBarrierSet::assembler()->gen_unpin_object(masm, thread, in_regs[i]);
2140 }
2141 restore_native_result(masm, ret_type, stack_slots);
2142 }
2143 }
2144 #endif
2145 // Switch thread to "native transition" state before reading the synchronization state.
2146 // This additional state is necessary because reading and testing the synchronization
2147 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2148 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2149 // VM thread changes sync state to synchronizing and suspends threads for GC.
2150 // Thread A is resumed to finish this native method, but doesn't block here since it
2151 // didn't see any synchronization is progress, and escapes.
2152 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2153
2154 if(os::is_MP()) {
2155 if (UseMembar) {
2156 // Force this write out before the read below
2157 __ membar(Assembler::Membar_mask_bits(
2158 Assembler::LoadLoad | Assembler::LoadStore |
2159 Assembler::StoreLoad | Assembler::StoreStore));
2160 } else {
2161 // Write serialization page so VM thread can do a pseudo remote membar.
2162 // We use the current thread pointer to calculate a thread specific
2163 // offset to write to within the page. This minimizes bus traffic
2164 // due to cache line collision.
2165 __ serialize_memory(thread, rcx);
2166 }
2167 }
2168
2169 if (AlwaysRestoreFPU) {
2170 // Make sure the control word is correct.
2171 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2172 }
2173
2174 Label after_transition;
2175
2176 // check for safepoint operation in progress and/or pending suspend requests
2177 { Label Continue, slow_path;
2178
2179 __ safepoint_poll(slow_path, thread, noreg);
2180
2181 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
2182 __ jcc(Assembler::equal, Continue);
2183 __ bind(slow_path);
2184
2185 // Don't use call_VM as it will see a possible pending exception and forward it
2186 // and never return here preventing us from clearing _last_native_pc down below.
2187 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2188 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2189 // by hand.
2190 //
2191 __ vzeroupper();
2192
2193 save_native_result(masm, ret_type, stack_slots);
2194 __ push(thread);
2195 if (!is_critical_native) {
2196 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2197 JavaThread::check_special_condition_for_native_trans)));
2198 } else {
2199 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2200 JavaThread::check_special_condition_for_native_trans_and_transition)));
2201 }
2202 __ increment(rsp, wordSize);
2203 // Restore any method result value
2204 restore_native_result(masm, ret_type, stack_slots);
2205
2206 if (is_critical_native) {
2207 // The call above performed the transition to thread_in_Java so
2208 // skip the transition logic below.
2209 __ jmpb(after_transition);
2210 }
2211
2212 __ bind(Continue);
2213 }
2214
2215 // change thread state
2216 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
2217 __ bind(after_transition);
2218
2219 Label reguard;
2220 Label reguard_done;
2221 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2222 __ jcc(Assembler::equal, reguard);
2223
2224 // slow path reguard re-enters here
2225 __ bind(reguard_done);
2226
2227 // Handle possible exception (will unlock if necessary)
2228
2229 // native result if any is live
2230
2231 // Unlock
2232 Label slow_path_unlock;
2233 Label unlock_done;
2234 if (method->is_synchronized()) {
2235
2236 Label done;
2237
2238 // Get locked oop from the handle we passed to jni
2239 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2240
2241 if (UseBiasedLocking) {
2242 __ biased_locking_exit(obj_reg, rbx, done);
2243 }
2244
2245 // Simple recursive lock?
2246
2247 __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
2248 __ jcc(Assembler::equal, done);
2249
2250 // Must save rax, if if it is live now because cmpxchg must use it
2251 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2252 save_native_result(masm, ret_type, stack_slots);
2253 }
2254
2255 // get old displaced header
2256 __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
2257
2258 // get address of the stack lock
2259 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2260
2261 // Atomic swap old header if oop still contains the stack lock
2262 if (os::is_MP()) {
2263 __ lock();
2264 }
2265
2266 // src -> dest iff dest == rax, else rax, <- dest
2267 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
2268 __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2269 __ jcc(Assembler::notEqual, slow_path_unlock);
2270
2271 // slow path re-enters here
2272 __ bind(unlock_done);
2273 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2274 restore_native_result(masm, ret_type, stack_slots);
2275 }
2276
2277 __ bind(done);
2278
2279 }
2280
2281 {
2282 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2283 // Tell dtrace about this method exit
2284 save_native_result(masm, ret_type, stack_slots);
2285 __ mov_metadata(rax, method());
2286 __ call_VM_leaf(
2287 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2288 thread, rax);
2289 restore_native_result(masm, ret_type, stack_slots);
2290 }
2291
2292 // We can finally stop using that last_Java_frame we setup ages ago
2293
2294 __ reset_last_Java_frame(thread, false);
2295
2296 // Unbox oop result, e.g. JNIHandles::resolve value.
2297 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2298 __ resolve_jobject(rax /* value */,
2299 thread /* thread */,
2300 rcx /* tmp */);
2301 }
2302
2303 if (CheckJNICalls) {
2304 // clear_pending_jni_exception_check
2305 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
2306 }
2307
2308 if (!is_critical_native) {
2309 // reset handle block
2310 __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
2311 __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2312
2313 // Any exception pending?
2314 __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2315 __ jcc(Assembler::notEqual, exception_pending);
2316 }
2317
2318 // no exception, we're almost done
2319
2320 // check that only result value is on FPU stack
2321 __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
2322
2323 // Fixup floating pointer results so that result looks like a return from a compiled method
2324 if (ret_type == T_FLOAT) {
2325 if (UseSSE >= 1) {
2326 // Pop st0 and store as float and reload into xmm register
2327 __ fstp_s(Address(rbp, -4));
2328 __ movflt(xmm0, Address(rbp, -4));
2329 }
2330 } else if (ret_type == T_DOUBLE) {
2331 if (UseSSE >= 2) {
2332 // Pop st0 and store as double and reload into xmm register
2333 __ fstp_d(Address(rbp, -8));
2334 __ movdbl(xmm0, Address(rbp, -8));
2335 }
2336 }
2337
2338 // Return
2339
2340 __ leave();
2341 __ ret(0);
2342
2343 // Unexpected paths are out of line and go here
2344
2345 // Slow path locking & unlocking
2346 if (method->is_synchronized()) {
2347
2348 // BEGIN Slow path lock
2349
2350 __ bind(slow_path_lock);
2351
2352 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2353 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2354 __ push(thread);
2355 __ push(lock_reg);
2356 __ push(obj_reg);
2357 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
2358 __ addptr(rsp, 3*wordSize);
2359
2360 #ifdef ASSERT
2361 { Label L;
2362 __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2363 __ jcc(Assembler::equal, L);
2364 __ stop("no pending exception allowed on exit from monitorenter");
2365 __ bind(L);
2366 }
2367 #endif
2368 __ jmp(lock_done);
2369
2370 // END Slow path lock
2371
2372 // BEGIN Slow path unlock
2373 __ bind(slow_path_unlock);
2374 __ vzeroupper();
2375 // Slow path unlock
2376
2377 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2378 save_native_result(masm, ret_type, stack_slots);
2379 }
2380 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2381
2382 __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2383 __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2384
2385
2386 // should be a peal
2387 // +wordSize because of the push above
2388 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2389 __ push(thread);
2390 __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2391 __ push(rax);
2392
2393 __ push(obj_reg);
2394 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2395 __ addptr(rsp, 3*wordSize);
2396 #ifdef ASSERT
2397 {
2398 Label L;
2399 __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2400 __ jcc(Assembler::equal, L);
2401 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2402 __ bind(L);
2403 }
2404 #endif /* ASSERT */
2405
2406 __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2407
2408 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2409 restore_native_result(masm, ret_type, stack_slots);
2410 }
2411 __ jmp(unlock_done);
2412 // END Slow path unlock
2413
2414 }
2415
2416 // SLOW PATH Reguard the stack if needed
2417
2418 __ bind(reguard);
2419 __ vzeroupper();
2420 save_native_result(masm, ret_type, stack_slots);
2421 {
2422 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2423 }
2424 restore_native_result(masm, ret_type, stack_slots);
2425 __ jmp(reguard_done);
2426
2427
2428 // BEGIN EXCEPTION PROCESSING
2429
2430 if (!is_critical_native) {
2431 // Forward the exception
2432 __ bind(exception_pending);
2433
2434 // remove possible return value from FPU register stack
2435 __ empty_FPU_stack();
2436
2437 // pop our frame
2438 __ leave();
2439 // and forward the exception
2440 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2441 }
2442
2443 __ flush();
2444
2445 nmethod *nm = nmethod::new_native_nmethod(method,
2446 compile_id,
2447 masm->code(),
2448 vep_offset,
2449 frame_complete,
2450 stack_slots / VMRegImpl::slots_per_word,
2451 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2452 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2453 oop_maps);
2454
2455 if (is_critical_native) {
2456 nm->set_lazy_critical_native(true);
2457 }
2458
2459 return nm;
2460
2461 }
2462
2463 // this function returns the adjust size (in number of words) to a c2i adapter
2464 // activation for use during deoptimization
last_frame_adjust(int callee_parameters,int callee_locals)2465 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2466 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2467 }
2468
2469
out_preserve_stack_slots()2470 uint SharedRuntime::out_preserve_stack_slots() {
2471 return 0;
2472 }
2473
2474 //------------------------------generate_deopt_blob----------------------------
generate_deopt_blob()2475 void SharedRuntime::generate_deopt_blob() {
2476 // allocate space for the code
2477 ResourceMark rm;
2478 // setup code generation tools
2479 // note: the buffer code size must account for StackShadowPages=50
2480 CodeBuffer buffer("deopt_blob", 1536, 1024);
2481 MacroAssembler* masm = new MacroAssembler(&buffer);
2482 int frame_size_in_words;
2483 OopMap* map = NULL;
2484 // Account for the extra args we place on the stack
2485 // by the time we call fetch_unroll_info
2486 const int additional_words = 2; // deopt kind, thread
2487
2488 OopMapSet *oop_maps = new OopMapSet();
2489
2490 // -------------
2491 // This code enters when returning to a de-optimized nmethod. A return
2492 // address has been pushed on the the stack, and return values are in
2493 // registers.
2494 // If we are doing a normal deopt then we were called from the patched
2495 // nmethod from the point we returned to the nmethod. So the return
2496 // address on the stack is wrong by NativeCall::instruction_size
2497 // We will adjust the value to it looks like we have the original return
2498 // address on the stack (like when we eagerly deoptimized).
2499 // In the case of an exception pending with deoptimized then we enter
2500 // with a return address on the stack that points after the call we patched
2501 // into the exception handler. We have the following register state:
2502 // rax,: exception
2503 // rbx,: exception handler
2504 // rdx: throwing pc
2505 // So in this case we simply jam rdx into the useless return address and
2506 // the stack looks just like we want.
2507 //
2508 // At this point we need to de-opt. We save the argument return
2509 // registers. We call the first C routine, fetch_unroll_info(). This
2510 // routine captures the return values and returns a structure which
2511 // describes the current frame size and the sizes of all replacement frames.
2512 // The current frame is compiled code and may contain many inlined
2513 // functions, each with their own JVM state. We pop the current frame, then
2514 // push all the new frames. Then we call the C routine unpack_frames() to
2515 // populate these frames. Finally unpack_frames() returns us the new target
2516 // address. Notice that callee-save registers are BLOWN here; they have
2517 // already been captured in the vframeArray at the time the return PC was
2518 // patched.
2519 address start = __ pc();
2520 Label cont;
2521
2522 // Prolog for non exception case!
2523
2524 // Save everything in sight.
2525
2526 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2527 // Normal deoptimization
2528 __ push(Deoptimization::Unpack_deopt);
2529 __ jmp(cont);
2530
2531 int reexecute_offset = __ pc() - start;
2532
2533 // Reexecute case
2534 // return address is the pc describes what bci to do re-execute at
2535
2536 // No need to update map as each call to save_live_registers will produce identical oopmap
2537 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2538
2539 __ push(Deoptimization::Unpack_reexecute);
2540 __ jmp(cont);
2541
2542 int exception_offset = __ pc() - start;
2543
2544 // Prolog for exception case
2545
2546 // all registers are dead at this entry point, except for rax, and
2547 // rdx which contain the exception oop and exception pc
2548 // respectively. Set them in TLS and fall thru to the
2549 // unpack_with_exception_in_tls entry point.
2550
2551 __ get_thread(rdi);
2552 __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2553 __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2554
2555 int exception_in_tls_offset = __ pc() - start;
2556
2557 // new implementation because exception oop is now passed in JavaThread
2558
2559 // Prolog for exception case
2560 // All registers must be preserved because they might be used by LinearScan
2561 // Exceptiop oop and throwing PC are passed in JavaThread
2562 // tos: stack at point of call to method that threw the exception (i.e. only
2563 // args are on the stack, no return address)
2564
2565 // make room on stack for the return address
2566 // It will be patched later with the throwing pc. The correct value is not
2567 // available now because loading it from memory would destroy registers.
2568 __ push(0);
2569
2570 // Save everything in sight.
2571
2572 // No need to update map as each call to save_live_registers will produce identical oopmap
2573 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2574
2575 // Now it is safe to overwrite any register
2576
2577 // store the correct deoptimization type
2578 __ push(Deoptimization::Unpack_exception);
2579
2580 // load throwing pc from JavaThread and patch it as the return address
2581 // of the current frame. Then clear the field in JavaThread
2582 __ get_thread(rdi);
2583 __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2584 __ movptr(Address(rbp, wordSize), rdx);
2585 __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2586
2587 #ifdef ASSERT
2588 // verify that there is really an exception oop in JavaThread
2589 __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2590 __ verify_oop(rax);
2591
2592 // verify that there is no pending exception
2593 Label no_pending_exception;
2594 __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2595 __ testptr(rax, rax);
2596 __ jcc(Assembler::zero, no_pending_exception);
2597 __ stop("must not have pending exception here");
2598 __ bind(no_pending_exception);
2599 #endif
2600
2601 __ bind(cont);
2602
2603 // Compiled code leaves the floating point stack dirty, empty it.
2604 __ empty_FPU_stack();
2605
2606
2607 // Call C code. Need thread and this frame, but NOT official VM entry
2608 // crud. We cannot block on this call, no GC can happen.
2609 __ get_thread(rcx);
2610 __ push(rcx);
2611 // fetch_unroll_info needs to call last_java_frame()
2612 __ set_last_Java_frame(rcx, noreg, noreg, NULL);
2613
2614 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2615
2616 // Need to have an oopmap that tells fetch_unroll_info where to
2617 // find any register it might need.
2618
2619 oop_maps->add_gc_map( __ pc()-start, map);
2620
2621 // Discard args to fetch_unroll_info
2622 __ pop(rcx);
2623 __ pop(rcx);
2624
2625 __ get_thread(rcx);
2626 __ reset_last_Java_frame(rcx, false);
2627
2628 // Load UnrollBlock into EDI
2629 __ mov(rdi, rax);
2630
2631 // Move the unpack kind to a safe place in the UnrollBlock because
2632 // we are very short of registers
2633
2634 Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
2635 // retrieve the deopt kind from the UnrollBlock.
2636 __ movl(rax, unpack_kind);
2637
2638 Label noException;
2639 __ cmpl(rax, Deoptimization::Unpack_exception); // Was exception pending?
2640 __ jcc(Assembler::notEqual, noException);
2641 __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2642 __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2643 __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2644 __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2645
2646 __ verify_oop(rax);
2647
2648 // Overwrite the result registers with the exception results.
2649 __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2650 __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2651
2652 __ bind(noException);
2653
2654 // Stack is back to only having register save data on the stack.
2655 // Now restore the result registers. Everything else is either dead or captured
2656 // in the vframeArray.
2657
2658 RegisterSaver::restore_result_registers(masm);
2659
2660 // Non standard control word may be leaked out through a safepoint blob, and we can
2661 // deopt at a poll point with the non standard control word. However, we should make
2662 // sure the control word is correct after restore_result_registers.
2663 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2664
2665 // All of the register save area has been popped of the stack. Only the
2666 // return address remains.
2667
2668 // Pop all the frames we must move/replace.
2669 //
2670 // Frame picture (youngest to oldest)
2671 // 1: self-frame (no frame link)
2672 // 2: deopting frame (no frame link)
2673 // 3: caller of deopting frame (could be compiled/interpreted).
2674 //
2675 // Note: by leaving the return address of self-frame on the stack
2676 // and using the size of frame 2 to adjust the stack
2677 // when we are done the return to frame 3 will still be on the stack.
2678
2679 // Pop deoptimized frame
2680 __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2681
2682 // sp should be pointing at the return address to the caller (3)
2683
2684 // Pick up the initial fp we should save
2685 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2686 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2687
2688 #ifdef ASSERT
2689 // Compilers generate code that bang the stack by as much as the
2690 // interpreter would need. So this stack banging should never
2691 // trigger a fault. Verify that it does not on non product builds.
2692 if (UseStackBanging) {
2693 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2694 __ bang_stack_size(rbx, rcx);
2695 }
2696 #endif
2697
2698 // Load array of frame pcs into ECX
2699 __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2700
2701 __ pop(rsi); // trash the old pc
2702
2703 // Load array of frame sizes into ESI
2704 __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2705
2706 Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2707
2708 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2709 __ movl(counter, rbx);
2710
2711 // Now adjust the caller's stack to make up for the extra locals
2712 // but record the original sp so that we can save it in the skeletal interpreter
2713 // frame and the stack walking of interpreter_sender will get the unextended sp
2714 // value and not the "real" sp value.
2715
2716 Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2717 __ movptr(sp_temp, rsp);
2718 __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2719 __ subptr(rsp, rbx);
2720
2721 // Push interpreter frames in a loop
2722 Label loop;
2723 __ bind(loop);
2724 __ movptr(rbx, Address(rsi, 0)); // Load frame size
2725 __ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand
2726 __ pushptr(Address(rcx, 0)); // save return address
2727 __ enter(); // save old & set new rbp,
2728 __ subptr(rsp, rbx); // Prolog!
2729 __ movptr(rbx, sp_temp); // sender's sp
2730 // This value is corrected by layout_activation_impl
2731 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2732 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2733 __ movptr(sp_temp, rsp); // pass to next frame
2734 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
2735 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
2736 __ decrementl(counter); // decrement counter
2737 __ jcc(Assembler::notZero, loop);
2738 __ pushptr(Address(rcx, 0)); // save final return address
2739
2740 // Re-push self-frame
2741 __ enter(); // save old & set new rbp,
2742
2743 // Return address and rbp, are in place
2744 // We'll push additional args later. Just allocate a full sized
2745 // register save area
2746 __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2747
2748 // Restore frame locals after moving the frame
2749 __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2750 __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2751 __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize)); // Pop float stack and store in local
2752 if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2753 if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2754
2755 // Set up the args to unpack_frame
2756
2757 __ pushl(unpack_kind); // get the unpack_kind value
2758 __ get_thread(rcx);
2759 __ push(rcx);
2760
2761 // set last_Java_sp, last_Java_fp
2762 __ set_last_Java_frame(rcx, noreg, rbp, NULL);
2763
2764 // Call C code. Need thread but NOT official VM entry
2765 // crud. We cannot block on this call, no GC can happen. Call should
2766 // restore return values to their stack-slots with the new SP.
2767 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2768 // Set an oopmap for the call site
2769 oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2770
2771 // rax, contains the return result type
2772 __ push(rax);
2773
2774 __ get_thread(rcx);
2775 __ reset_last_Java_frame(rcx, false);
2776
2777 // Collect return values
2778 __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2779 __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2780
2781 // Clear floating point stack before returning to interpreter
2782 __ empty_FPU_stack();
2783
2784 // Check if we should push the float or double return value.
2785 Label results_done, yes_double_value;
2786 __ cmpl(Address(rsp, 0), T_DOUBLE);
2787 __ jcc (Assembler::zero, yes_double_value);
2788 __ cmpl(Address(rsp, 0), T_FLOAT);
2789 __ jcc (Assembler::notZero, results_done);
2790
2791 // return float value as expected by interpreter
2792 if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2793 else __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2794 __ jmp(results_done);
2795
2796 // return double value as expected by interpreter
2797 __ bind(yes_double_value);
2798 if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2799 else __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2800
2801 __ bind(results_done);
2802
2803 // Pop self-frame.
2804 __ leave(); // Epilog!
2805
2806 // Jump to interpreter
2807 __ ret(0);
2808
2809 // -------------
2810 // make sure all code is generated
2811 masm->flush();
2812
2813 _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2814 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2815 }
2816
2817
2818 #ifdef COMPILER2
2819 //------------------------------generate_uncommon_trap_blob--------------------
generate_uncommon_trap_blob()2820 void SharedRuntime::generate_uncommon_trap_blob() {
2821 // allocate space for the code
2822 ResourceMark rm;
2823 // setup code generation tools
2824 CodeBuffer buffer("uncommon_trap_blob", 512, 512);
2825 MacroAssembler* masm = new MacroAssembler(&buffer);
2826
2827 enum frame_layout {
2828 arg0_off, // thread sp + 0 // Arg location for
2829 arg1_off, // unloaded_class_index sp + 1 // calling C
2830 arg2_off, // exec_mode sp + 2
2831 // The frame sender code expects that rbp will be in the "natural" place and
2832 // will override any oopMap setting for it. We must therefore force the layout
2833 // so that it agrees with the frame sender code.
2834 rbp_off, // callee saved register sp + 3
2835 return_off, // slot for return address sp + 4
2836 framesize
2837 };
2838
2839 address start = __ pc();
2840
2841 if (UseRTMLocking) {
2842 // Abort RTM transaction before possible nmethod deoptimization.
2843 __ xabort(0);
2844 }
2845
2846 // Push self-frame.
2847 __ subptr(rsp, return_off*wordSize); // Epilog!
2848
2849 // rbp, is an implicitly saved callee saved register (i.e. the calling
2850 // convention will save restore it in prolog/epilog) Other than that
2851 // there are no callee save registers no that adapter frames are gone.
2852 __ movptr(Address(rsp, rbp_off*wordSize), rbp);
2853
2854 // Clear the floating point exception stack
2855 __ empty_FPU_stack();
2856
2857 // set last_Java_sp
2858 __ get_thread(rdx);
2859 __ set_last_Java_frame(rdx, noreg, noreg, NULL);
2860
2861 // Call C code. Need thread but NOT official VM entry
2862 // crud. We cannot block on this call, no GC can happen. Call should
2863 // capture callee-saved registers as well as return values.
2864 __ movptr(Address(rsp, arg0_off*wordSize), rdx);
2865 // argument already in ECX
2866 __ movl(Address(rsp, arg1_off*wordSize),rcx);
2867 __ movl(Address(rsp, arg2_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2868 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2869
2870 // Set an oopmap for the call site
2871 OopMapSet *oop_maps = new OopMapSet();
2872 OopMap* map = new OopMap( framesize, 0 );
2873 // No oopMap for rbp, it is known implicitly
2874
2875 oop_maps->add_gc_map( __ pc()-start, map);
2876
2877 __ get_thread(rcx);
2878
2879 __ reset_last_Java_frame(rcx, false);
2880
2881 // Load UnrollBlock into EDI
2882 __ movptr(rdi, rax);
2883
2884 #ifdef ASSERT
2885 { Label L;
2886 __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
2887 (int32_t)Deoptimization::Unpack_uncommon_trap);
2888 __ jcc(Assembler::equal, L);
2889 __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
2890 __ bind(L);
2891 }
2892 #endif
2893
2894 // Pop all the frames we must move/replace.
2895 //
2896 // Frame picture (youngest to oldest)
2897 // 1: self-frame (no frame link)
2898 // 2: deopting frame (no frame link)
2899 // 3: caller of deopting frame (could be compiled/interpreted).
2900
2901 // Pop self-frame. We have no frame, and must rely only on EAX and ESP.
2902 __ addptr(rsp,(framesize-1)*wordSize); // Epilog!
2903
2904 // Pop deoptimized frame
2905 __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2906 __ addptr(rsp, rcx);
2907
2908 // sp should be pointing at the return address to the caller (3)
2909
2910 // Pick up the initial fp we should save
2911 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2912 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2913
2914 #ifdef ASSERT
2915 // Compilers generate code that bang the stack by as much as the
2916 // interpreter would need. So this stack banging should never
2917 // trigger a fault. Verify that it does not on non product builds.
2918 if (UseStackBanging) {
2919 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2920 __ bang_stack_size(rbx, rcx);
2921 }
2922 #endif
2923
2924 // Load array of frame pcs into ECX
2925 __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2926
2927 __ pop(rsi); // trash the pc
2928
2929 // Load array of frame sizes into ESI
2930 __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2931
2932 Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2933
2934 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2935 __ movl(counter, rbx);
2936
2937 // Now adjust the caller's stack to make up for the extra locals
2938 // but record the original sp so that we can save it in the skeletal interpreter
2939 // frame and the stack walking of interpreter_sender will get the unextended sp
2940 // value and not the "real" sp value.
2941
2942 Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2943 __ movptr(sp_temp, rsp);
2944 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2945 __ subptr(rsp, rbx);
2946
2947 // Push interpreter frames in a loop
2948 Label loop;
2949 __ bind(loop);
2950 __ movptr(rbx, Address(rsi, 0)); // Load frame size
2951 __ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand
2952 __ pushptr(Address(rcx, 0)); // save return address
2953 __ enter(); // save old & set new rbp,
2954 __ subptr(rsp, rbx); // Prolog!
2955 __ movptr(rbx, sp_temp); // sender's sp
2956 // This value is corrected by layout_activation_impl
2957 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2958 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2959 __ movptr(sp_temp, rsp); // pass to next frame
2960 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
2961 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
2962 __ decrementl(counter); // decrement counter
2963 __ jcc(Assembler::notZero, loop);
2964 __ pushptr(Address(rcx, 0)); // save final return address
2965
2966 // Re-push self-frame
2967 __ enter(); // save old & set new rbp,
2968 __ subptr(rsp, (framesize-2) * wordSize); // Prolog!
2969
2970
2971 // set last_Java_sp, last_Java_fp
2972 __ get_thread(rdi);
2973 __ set_last_Java_frame(rdi, noreg, rbp, NULL);
2974
2975 // Call C code. Need thread but NOT official VM entry
2976 // crud. We cannot block on this call, no GC can happen. Call should
2977 // restore return values to their stack-slots with the new SP.
2978 __ movptr(Address(rsp,arg0_off*wordSize),rdi);
2979 __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2980 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2981 // Set an oopmap for the call site
2982 oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
2983
2984 __ get_thread(rdi);
2985 __ reset_last_Java_frame(rdi, true);
2986
2987 // Pop self-frame.
2988 __ leave(); // Epilog!
2989
2990 // Jump to interpreter
2991 __ ret(0);
2992
2993 // -------------
2994 // make sure all code is generated
2995 masm->flush();
2996
2997 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
2998 }
2999 #endif // COMPILER2
3000
3001 //------------------------------generate_handler_blob------
3002 //
3003 // Generate a special Compile2Runtime blob that saves all registers,
3004 // setup oopmap, and calls safepoint code to stop the compiled code for
3005 // a safepoint.
3006 //
generate_handler_blob(address call_ptr,int poll_type)3007 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3008
3009 // Account for thread arg in our frame
3010 const int additional_words = 1;
3011 int frame_size_in_words;
3012
3013 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3014
3015 ResourceMark rm;
3016 OopMapSet *oop_maps = new OopMapSet();
3017 OopMap* map;
3018
3019 // allocate space for the code
3020 // setup code generation tools
3021 CodeBuffer buffer("handler_blob", 1024, 512);
3022 MacroAssembler* masm = new MacroAssembler(&buffer);
3023
3024 const Register java_thread = rdi; // callee-saved for VC++
3025 address start = __ pc();
3026 address call_pc = NULL;
3027 bool cause_return = (poll_type == POLL_AT_RETURN);
3028 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3029
3030 if (UseRTMLocking) {
3031 // Abort RTM transaction before calling runtime
3032 // because critical section will be large and will be
3033 // aborted anyway. Also nmethod could be deoptimized.
3034 __ xabort(0);
3035 }
3036
3037 // If cause_return is true we are at a poll_return and there is
3038 // the return address on the stack to the caller on the nmethod
3039 // that is safepoint. We can leave this return on the stack and
3040 // effectively complete the return and safepoint in the caller.
3041 // Otherwise we push space for a return address that the safepoint
3042 // handler will install later to make the stack walking sensible.
3043 if (!cause_return)
3044 __ push(rbx); // Make room for return address (or push it again)
3045
3046 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
3047
3048 // The following is basically a call_VM. However, we need the precise
3049 // address of the call in order to generate an oopmap. Hence, we do all the
3050 // work ourselves.
3051
3052 // Push thread argument and setup last_Java_sp
3053 __ get_thread(java_thread);
3054 __ push(java_thread);
3055 __ set_last_Java_frame(java_thread, noreg, noreg, NULL);
3056
3057 // if this was not a poll_return then we need to correct the return address now.
3058 if (!cause_return) {
3059 // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
3060 // Additionally, rbx is a callee saved register and we can look at it later to determine
3061 // if someone changed the return address for us!
3062 __ movptr(rbx, Address(java_thread, JavaThread::saved_exception_pc_offset()));
3063 __ movptr(Address(rbp, wordSize), rbx);
3064 }
3065
3066 // do the call
3067 __ call(RuntimeAddress(call_ptr));
3068
3069 // Set an oopmap for the call site. This oopmap will map all
3070 // oop-registers and debug-info registers as callee-saved. This
3071 // will allow deoptimization at this safepoint to find all possible
3072 // debug-info recordings, as well as let GC find all oops.
3073
3074 oop_maps->add_gc_map( __ pc() - start, map);
3075
3076 // Discard arg
3077 __ pop(rcx);
3078
3079 Label noException;
3080
3081 // Clear last_Java_sp again
3082 __ get_thread(java_thread);
3083 __ reset_last_Java_frame(java_thread, false);
3084
3085 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3086 __ jcc(Assembler::equal, noException);
3087
3088 // Exception pending
3089 RegisterSaver::restore_live_registers(masm, save_vectors);
3090
3091 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3092
3093 __ bind(noException);
3094
3095 Label no_adjust, bail, not_special;
3096 if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
3097 // If our stashed return pc was modified by the runtime we avoid touching it
3098 __ cmpptr(rbx, Address(rbp, wordSize));
3099 __ jccb(Assembler::notEqual, no_adjust);
3100
3101 // Skip over the poll instruction.
3102 // See NativeInstruction::is_safepoint_poll()
3103 // Possible encodings:
3104 // 85 00 test %eax,(%rax)
3105 // 85 01 test %eax,(%rcx)
3106 // 85 02 test %eax,(%rdx)
3107 // 85 03 test %eax,(%rbx)
3108 // 85 06 test %eax,(%rsi)
3109 // 85 07 test %eax,(%rdi)
3110 //
3111 // 85 04 24 test %eax,(%rsp)
3112 // 85 45 00 test %eax,0x0(%rbp)
3113
3114 #ifdef ASSERT
3115 __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
3116 #endif
3117 // rsp/rbp base encoding takes 3 bytes with the following register values:
3118 // rsp 0x04
3119 // rbp 0x05
3120 __ movzbl(rcx, Address(rbx, 1));
3121 __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
3122 __ subptr(rcx, 4); // looking for 0x00 .. 0x01
3123 __ cmpptr(rcx, 1);
3124 __ jcc(Assembler::above, not_special);
3125 __ addptr(rbx, 1);
3126 __ bind(not_special);
3127 #ifdef ASSERT
3128 // Verify the correct encoding of the poll we're about to skip.
3129 __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
3130 __ jcc(Assembler::notEqual, bail);
3131 // Mask out the modrm bits
3132 __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
3133 // rax encodes to 0, so if the bits are nonzero it's incorrect
3134 __ jcc(Assembler::notZero, bail);
3135 #endif
3136 // Adjust return pc forward to step over the safepoint poll instruction
3137 __ addptr(rbx, 2);
3138 __ movptr(Address(rbp, wordSize), rbx);
3139 }
3140
3141 __ bind(no_adjust);
3142 // Normal exit, register restoring and exit
3143 RegisterSaver::restore_live_registers(masm, save_vectors);
3144
3145 __ ret(0);
3146
3147 #ifdef ASSERT
3148 __ bind(bail);
3149 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
3150 #endif
3151
3152 // make sure all code is generated
3153 masm->flush();
3154
3155 // Fill-out other meta info
3156 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3157 }
3158
3159 //
3160 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3161 //
3162 // Generate a stub that calls into vm to find out the proper destination
3163 // of a java call. All the argument registers are live at this point
3164 // but since this is generic code we don't know what they are and the caller
3165 // must do any gc of the args.
3166 //
generate_resolve_blob(address destination,const char * name)3167 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3168 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3169
3170 // allocate space for the code
3171 ResourceMark rm;
3172
3173 CodeBuffer buffer(name, 1000, 512);
3174 MacroAssembler* masm = new MacroAssembler(&buffer);
3175
3176 int frame_size_words;
3177 enum frame_layout {
3178 thread_off,
3179 extra_words };
3180
3181 OopMapSet *oop_maps = new OopMapSet();
3182 OopMap* map = NULL;
3183
3184 int start = __ offset();
3185
3186 map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
3187
3188 int frame_complete = __ offset();
3189
3190 const Register thread = rdi;
3191 __ get_thread(rdi);
3192
3193 __ push(thread);
3194 __ set_last_Java_frame(thread, noreg, rbp, NULL);
3195
3196 __ call(RuntimeAddress(destination));
3197
3198
3199 // Set an oopmap for the call site.
3200 // We need this not only for callee-saved registers, but also for volatile
3201 // registers that the compiler might be keeping live across a safepoint.
3202
3203 oop_maps->add_gc_map( __ offset() - start, map);
3204
3205 // rax, contains the address we are going to jump to assuming no exception got installed
3206
3207 __ addptr(rsp, wordSize);
3208
3209 // clear last_Java_sp
3210 __ reset_last_Java_frame(thread, true);
3211 // check for pending exceptions
3212 Label pending;
3213 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3214 __ jcc(Assembler::notEqual, pending);
3215
3216 // get the returned Method*
3217 __ get_vm_result_2(rbx, thread);
3218 __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
3219
3220 __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
3221
3222 RegisterSaver::restore_live_registers(masm);
3223
3224 // We are back the the original state on entry and ready to go.
3225
3226 __ jmp(rax);
3227
3228 // Pending exception after the safepoint
3229
3230 __ bind(pending);
3231
3232 RegisterSaver::restore_live_registers(masm);
3233
3234 // exception pending => remove activation and forward to exception handler
3235
3236 __ get_thread(thread);
3237 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
3238 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
3239 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3240
3241 // -------------
3242 // make sure all code is generated
3243 masm->flush();
3244
3245 // return the blob
3246 // frame_size_words or bytes??
3247 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3248 }
3249