1 /*
2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #if !defined(_WINDOWS) && !defined(_BSDONLY_SOURCE)
27 #include "alloca.h"
28 #endif
29 #ifdef _BSDONLY_SOURCE
30 #include <stdlib.h>
31 #endif
32 #include "asm/macroAssembler.hpp"
33 #include "asm/macroAssembler.inline.hpp"
34 #include "code/debugInfoRec.hpp"
35 #include "code/icBuffer.hpp"
36 #include "code/nativeInst.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "gc/shared/barrierSet.hpp"
41 #include "gc/shared/barrierSetAssembler.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "logging/log.hpp"
44 #include "memory/resourceArea.hpp"
45 #include "oops/compiledICHolder.hpp"
46 #include "runtime/safepointMechanism.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "runtime/vframeArray.hpp"
49 #include "utilities/align.hpp"
50 #include "utilities/formatBuffer.hpp"
51 #include "vm_version_x86.hpp"
52 #include "vmreg_x86.inline.hpp"
53 #ifdef COMPILER1
54 #include "c1/c1_Runtime1.hpp"
55 #endif
56 #ifdef COMPILER2
57 #include "opto/runtime.hpp"
58 #endif
59 #if INCLUDE_JVMCI
60 #include "jvmci/jvmciJavaClasses.hpp"
61 #endif
62
63 #define __ masm->
64
65 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
66
67 class SimpleRuntimeFrame {
68
69 public:
70
71 // Most of the runtime stubs have this simple frame layout.
72 // This class exists to make the layout shared in one place.
73 // Offsets are for compiler stack slots, which are jints.
74 enum layout {
75 // The frame sender code expects that rbp will be in the "natural" place and
76 // will override any oopMap setting for it. We must therefore force the layout
77 // so that it agrees with the frame sender code.
78 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
79 rbp_off2,
80 return_off, return_off2,
81 framesize
82 };
83 };
84
85 class RegisterSaver {
86 // Capture info about frame layout. Layout offsets are in jint
87 // units because compiler frame slots are jints.
88 #define XSAVE_AREA_BEGIN 160
89 #define XSAVE_AREA_YMM_BEGIN 576
90 #define XSAVE_AREA_ZMM_BEGIN 1152
91 #define XSAVE_AREA_UPPERBANK 1664
92 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
93 #define DEF_YMM_OFFS(regnum) ymm ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt, ymm ## regnum ## H_off
94 #define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off
95 enum layout {
96 fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
97 xmm_off = fpu_state_off + XSAVE_AREA_BEGIN/BytesPerInt, // offset in fxsave save area
98 DEF_XMM_OFFS(0),
99 DEF_XMM_OFFS(1),
100 // 2..15 are implied in range usage
101 ymm_off = xmm_off + (XSAVE_AREA_YMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
102 DEF_YMM_OFFS(0),
103 DEF_YMM_OFFS(1),
104 // 2..15 are implied in range usage
105 zmm_high = xmm_off + (XSAVE_AREA_ZMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
106 zmm_off = xmm_off + (XSAVE_AREA_UPPERBANK - XSAVE_AREA_BEGIN)/BytesPerInt,
107 DEF_ZMM_OFFS(16),
108 DEF_ZMM_OFFS(17),
109 // 18..31 are implied in range usage
110 fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
111 fpu_stateH_end,
112 r15_off, r15H_off,
113 r14_off, r14H_off,
114 r13_off, r13H_off,
115 r12_off, r12H_off,
116 r11_off, r11H_off,
117 r10_off, r10H_off,
118 r9_off, r9H_off,
119 r8_off, r8H_off,
120 rdi_off, rdiH_off,
121 rsi_off, rsiH_off,
122 ignore_off, ignoreH_off, // extra copy of rbp
123 rsp_off, rspH_off,
124 rbx_off, rbxH_off,
125 rdx_off, rdxH_off,
126 rcx_off, rcxH_off,
127 rax_off, raxH_off,
128 // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
129 align_off, alignH_off,
130 flags_off, flagsH_off,
131 // The frame sender code expects that rbp will be in the "natural" place and
132 // will override any oopMap setting for it. We must therefore force the layout
133 // so that it agrees with the frame sender code.
134 rbp_off, rbpH_off, // copy of rbp we will restore
135 return_off, returnH_off, // slot for return address
136 reg_save_size // size in compiler stack slots
137 };
138
139 public:
140 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
141 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
142
143 // Offsets into the register save area
144 // Used by deoptimization when it is managing result register
145 // values on its own
146
rax_offset_in_bytes(void)147 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
rdx_offset_in_bytes(void)148 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
rbx_offset_in_bytes(void)149 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
xmm0_offset_in_bytes(void)150 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
return_offset_in_bytes(void)151 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
152
153 // During deoptimization only the result registers need to be restored,
154 // all the other values have already been extracted.
155 static void restore_result_registers(MacroAssembler* masm);
156 };
157
save_live_registers(MacroAssembler * masm,int additional_frame_words,int * total_frame_words,bool save_vectors)158 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
159 int off = 0;
160 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
161 if (UseAVX < 3) {
162 num_xmm_regs = num_xmm_regs/2;
163 }
164 #if COMPILER2_OR_JVMCI
165 if (save_vectors) {
166 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
167 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
168 }
169 #else
170 assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
171 #endif
172
173 // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
174 int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
175 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
176 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
177 // CodeBlob frame size is in words.
178 int frame_size_in_words = frame_size_in_bytes / wordSize;
179 *total_frame_words = frame_size_in_words;
180
181 // Save registers, fpu state, and flags.
182 // We assume caller has already pushed the return address onto the
183 // stack, so rsp is 8-byte aligned here.
184 // We push rpb twice in this sequence because we want the real rbp
185 // to be under the return like a normal enter.
186
187 __ enter(); // rsp becomes 16-byte aligned here
188 __ push_CPU_state(); // Push a multiple of 16 bytes
189
190 // push cpu state handles this on EVEX enabled targets
191 if (save_vectors) {
192 // Save upper half of YMM registers(0..15)
193 int base_addr = XSAVE_AREA_YMM_BEGIN;
194 for (int n = 0; n < 16; n++) {
195 __ vextractf128_high(Address(rsp, base_addr+n*16), as_XMMRegister(n));
196 }
197 if (VM_Version::supports_evex()) {
198 // Save upper half of ZMM registers(0..15)
199 base_addr = XSAVE_AREA_ZMM_BEGIN;
200 for (int n = 0; n < 16; n++) {
201 __ vextractf64x4_high(Address(rsp, base_addr+n*32), as_XMMRegister(n));
202 }
203 // Save full ZMM registers(16..num_xmm_regs)
204 base_addr = XSAVE_AREA_UPPERBANK;
205 off = 0;
206 int vector_len = Assembler::AVX_512bit;
207 for (int n = 16; n < num_xmm_regs; n++) {
208 __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
209 }
210 }
211 } else {
212 if (VM_Version::supports_evex()) {
213 // Save upper bank of ZMM registers(16..31) for double/float usage
214 int base_addr = XSAVE_AREA_UPPERBANK;
215 off = 0;
216 for (int n = 16; n < num_xmm_regs; n++) {
217 __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
218 }
219 }
220 }
221 __ vzeroupper();
222 if (frame::arg_reg_save_area_bytes != 0) {
223 // Allocate argument register save area
224 __ subptr(rsp, frame::arg_reg_save_area_bytes);
225 }
226
227 // Set an oopmap for the call site. This oopmap will map all
228 // oop-registers and debug-info registers as callee-saved. This
229 // will allow deoptimization at this safepoint to find all possible
230 // debug-info recordings, as well as let GC find all oops.
231
232 OopMapSet *oop_maps = new OopMapSet();
233 OopMap* map = new OopMap(frame_size_in_slots, 0);
234
235 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x))
236
237 map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
238 map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
239 map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
240 map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
241 // rbp location is known implicitly by the frame sender code, needs no oopmap
242 // and the location where rbp was saved by is ignored
243 map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
244 map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
245 map->set_callee_saved(STACK_OFFSET( r8_off ), r8->as_VMReg());
246 map->set_callee_saved(STACK_OFFSET( r9_off ), r9->as_VMReg());
247 map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
248 map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
249 map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
250 map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
251 map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
252 map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
253 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
254 // on EVEX enabled targets, we get it included in the xsave area
255 off = xmm0_off;
256 int delta = xmm1_off - off;
257 for (int n = 0; n < 16; n++) {
258 XMMRegister xmm_name = as_XMMRegister(n);
259 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
260 off += delta;
261 }
262 if(UseAVX > 2) {
263 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
264 off = zmm16_off;
265 delta = zmm17_off - off;
266 for (int n = 16; n < num_xmm_regs; n++) {
267 XMMRegister zmm_name = as_XMMRegister(n);
268 map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
269 off += delta;
270 }
271 }
272
273 #if COMPILER2_OR_JVMCI
274 if (save_vectors) {
275 off = ymm0_off;
276 int delta = ymm1_off - off;
277 for (int n = 0; n < 16; n++) {
278 XMMRegister ymm_name = as_XMMRegister(n);
279 map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
280 off += delta;
281 }
282 }
283 #endif // COMPILER2_OR_JVMCI
284
285 // %%% These should all be a waste but we'll keep things as they were for now
286 if (true) {
287 map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
288 map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
289 map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
290 map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
291 // rbp location is known implicitly by the frame sender code, needs no oopmap
292 map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
293 map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
294 map->set_callee_saved(STACK_OFFSET( r8H_off ), r8->as_VMReg()->next());
295 map->set_callee_saved(STACK_OFFSET( r9H_off ), r9->as_VMReg()->next());
296 map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
297 map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
298 map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
299 map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
300 map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
301 map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
302 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
303 // on EVEX enabled targets, we get it included in the xsave area
304 off = xmm0H_off;
305 delta = xmm1H_off - off;
306 for (int n = 0; n < 16; n++) {
307 XMMRegister xmm_name = as_XMMRegister(n);
308 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next());
309 off += delta;
310 }
311 if (UseAVX > 2) {
312 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
313 off = zmm16H_off;
314 delta = zmm17H_off - off;
315 for (int n = 16; n < num_xmm_regs; n++) {
316 XMMRegister zmm_name = as_XMMRegister(n);
317 map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
318 off += delta;
319 }
320 }
321 }
322
323 return map;
324 }
325
restore_live_registers(MacroAssembler * masm,bool restore_vectors)326 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
327 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
328 if (UseAVX < 3) {
329 num_xmm_regs = num_xmm_regs/2;
330 }
331 if (frame::arg_reg_save_area_bytes != 0) {
332 // Pop arg register save area
333 __ addptr(rsp, frame::arg_reg_save_area_bytes);
334 }
335
336 #if COMPILER2_OR_JVMCI
337 if (restore_vectors) {
338 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
339 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
340 }
341 #else
342 assert(!restore_vectors, "vectors are generated only by C2");
343 #endif
344
345 __ vzeroupper();
346
347 // On EVEX enabled targets everything is handled in pop fpu state
348 if (restore_vectors) {
349 // Restore upper half of YMM registers (0..15)
350 int base_addr = XSAVE_AREA_YMM_BEGIN;
351 for (int n = 0; n < 16; n++) {
352 __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
353 }
354 if (VM_Version::supports_evex()) {
355 // Restore upper half of ZMM registers (0..15)
356 base_addr = XSAVE_AREA_ZMM_BEGIN;
357 for (int n = 0; n < 16; n++) {
358 __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, base_addr+n*32));
359 }
360 // Restore full ZMM registers(16..num_xmm_regs)
361 base_addr = XSAVE_AREA_UPPERBANK;
362 int vector_len = Assembler::AVX_512bit;
363 int off = 0;
364 for (int n = 16; n < num_xmm_regs; n++) {
365 __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
366 }
367 }
368 } else {
369 if (VM_Version::supports_evex()) {
370 // Restore upper bank of ZMM registers(16..31) for double/float usage
371 int base_addr = XSAVE_AREA_UPPERBANK;
372 int off = 0;
373 for (int n = 16; n < num_xmm_regs; n++) {
374 __ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
375 }
376 }
377 }
378
379 // Recover CPU state
380 __ pop_CPU_state();
381 // Get the rbp described implicitly by the calling convention (no oopMap)
382 __ pop(rbp);
383 }
384
restore_result_registers(MacroAssembler * masm)385 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
386
387 // Just restore result register. Only used by deoptimization. By
388 // now any callee save register that needs to be restored to a c2
389 // caller of the deoptee has been extracted into the vframeArray
390 // and will be stuffed into the c2i adapter we create for later
391 // restoration so only result registers need to be restored here.
392
393 // Restore fp result register
394 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
395 // Restore integer result register
396 __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
397 __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
398
399 // Pop all of the register save are off the stack except the return address
400 __ addptr(rsp, return_offset_in_bytes());
401 }
402
403 // Is vector's size (in bytes) bigger than a size saved by default?
404 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
is_wide_vector(int size)405 bool SharedRuntime::is_wide_vector(int size) {
406 return size > 16;
407 }
408
trampoline_size()409 size_t SharedRuntime::trampoline_size() {
410 return 16;
411 }
412
generate_trampoline(MacroAssembler * masm,address destination)413 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
414 __ jump(RuntimeAddress(destination));
415 }
416
417 // The java_calling_convention describes stack locations as ideal slots on
418 // a frame with no abi restrictions. Since we must observe abi restrictions
419 // (like the placement of the register window) the slots must be biased by
420 // the following value.
reg2offset_in(VMReg r)421 static int reg2offset_in(VMReg r) {
422 // Account for saved rbp and return address
423 // This should really be in_preserve_stack_slots
424 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
425 }
426
reg2offset_out(VMReg r)427 static int reg2offset_out(VMReg r) {
428 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
429 }
430
431 // ---------------------------------------------------------------------------
432 // Read the array of BasicTypes from a signature, and compute where the
433 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
434 // quantities. Values less than VMRegImpl::stack0 are registers, those above
435 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
436 // as framesizes are fixed.
437 // VMRegImpl::stack0 refers to the first slot 0(sp).
438 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
439 // up to RegisterImpl::number_of_registers) are the 64-bit
440 // integer registers.
441
442 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
443 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
444 // units regardless of build. Of course for i486 there is no 64 bit build
445
446 // The Java calling convention is a "shifted" version of the C ABI.
447 // By skipping the first C ABI register we can call non-static jni methods
448 // with small numbers of arguments without having to shuffle the arguments
449 // at all. Since we control the java ABI we ought to at least get some
450 // advantage out of it.
451
java_calling_convention(const BasicType * sig_bt,VMRegPair * regs,int total_args_passed,int is_outgoing)452 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
453 VMRegPair *regs,
454 int total_args_passed,
455 int is_outgoing) {
456
457 // Create the mapping between argument positions and
458 // registers.
459 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
460 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
461 };
462 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
463 j_farg0, j_farg1, j_farg2, j_farg3,
464 j_farg4, j_farg5, j_farg6, j_farg7
465 };
466
467
468 uint int_args = 0;
469 uint fp_args = 0;
470 uint stk_args = 0; // inc by 2 each time
471
472 for (int i = 0; i < total_args_passed; i++) {
473 switch (sig_bt[i]) {
474 case T_BOOLEAN:
475 case T_CHAR:
476 case T_BYTE:
477 case T_SHORT:
478 case T_INT:
479 if (int_args < Argument::n_int_register_parameters_j) {
480 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
481 } else {
482 regs[i].set1(VMRegImpl::stack2reg(stk_args));
483 stk_args += 2;
484 }
485 break;
486 case T_VOID:
487 // halves of T_LONG or T_DOUBLE
488 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
489 regs[i].set_bad();
490 break;
491 case T_LONG:
492 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
493 // fall through
494 case T_OBJECT:
495 case T_ARRAY:
496 case T_ADDRESS:
497 if (int_args < Argument::n_int_register_parameters_j) {
498 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
499 } else {
500 regs[i].set2(VMRegImpl::stack2reg(stk_args));
501 stk_args += 2;
502 }
503 break;
504 case T_FLOAT:
505 if (fp_args < Argument::n_float_register_parameters_j) {
506 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
507 } else {
508 regs[i].set1(VMRegImpl::stack2reg(stk_args));
509 stk_args += 2;
510 }
511 break;
512 case T_DOUBLE:
513 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
514 if (fp_args < Argument::n_float_register_parameters_j) {
515 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
516 } else {
517 regs[i].set2(VMRegImpl::stack2reg(stk_args));
518 stk_args += 2;
519 }
520 break;
521 default:
522 ShouldNotReachHere();
523 break;
524 }
525 }
526
527 return align_up(stk_args, 2);
528 }
529
530 // Patch the callers callsite with entry to compiled code if it exists.
patch_callers_callsite(MacroAssembler * masm)531 static void patch_callers_callsite(MacroAssembler *masm) {
532 Label L;
533 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
534 __ jcc(Assembler::equal, L);
535
536 // Save the current stack pointer
537 __ mov(r13, rsp);
538 // Schedule the branch target address early.
539 // Call into the VM to patch the caller, then jump to compiled callee
540 // rax isn't live so capture return address while we easily can
541 __ movptr(rax, Address(rsp, 0));
542
543 // align stack so push_CPU_state doesn't fault
544 __ andptr(rsp, -(StackAlignmentInBytes));
545 __ push_CPU_state();
546 __ vzeroupper();
547 // VM needs caller's callsite
548 // VM needs target method
549 // This needs to be a long call since we will relocate this adapter to
550 // the codeBuffer and it may not reach
551
552 // Allocate argument register save area
553 if (frame::arg_reg_save_area_bytes != 0) {
554 __ subptr(rsp, frame::arg_reg_save_area_bytes);
555 }
556 __ mov(c_rarg0, rbx);
557 __ mov(c_rarg1, rax);
558 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
559
560 // De-allocate argument register save area
561 if (frame::arg_reg_save_area_bytes != 0) {
562 __ addptr(rsp, frame::arg_reg_save_area_bytes);
563 }
564
565 __ vzeroupper();
566 __ pop_CPU_state();
567 // restore sp
568 __ mov(rsp, r13);
569 __ bind(L);
570 }
571
572
gen_c2i_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,Label & skip_fixup)573 static void gen_c2i_adapter(MacroAssembler *masm,
574 int total_args_passed,
575 int comp_args_on_stack,
576 const BasicType *sig_bt,
577 const VMRegPair *regs,
578 Label& skip_fixup) {
579 // Before we get into the guts of the C2I adapter, see if we should be here
580 // at all. We've come from compiled code and are attempting to jump to the
581 // interpreter, which means the caller made a static call to get here
582 // (vcalls always get a compiled target if there is one). Check for a
583 // compiled target. If there is one, we need to patch the caller's call.
584 patch_callers_callsite(masm);
585
586 __ bind(skip_fixup);
587
588 // Since all args are passed on the stack, total_args_passed *
589 // Interpreter::stackElementSize is the space we need. Plus 1 because
590 // we also account for the return address location since
591 // we store it first rather than hold it in rax across all the shuffling
592
593 int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
594
595 // stack is aligned, keep it that way
596 extraspace = align_up(extraspace, 2*wordSize);
597
598 // Get return address
599 __ pop(rax);
600
601 // set senderSP value
602 __ mov(r13, rsp);
603
604 __ subptr(rsp, extraspace);
605
606 // Store the return address in the expected location
607 __ movptr(Address(rsp, 0), rax);
608
609 // Now write the args into the outgoing interpreter space
610 for (int i = 0; i < total_args_passed; i++) {
611 if (sig_bt[i] == T_VOID) {
612 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
613 continue;
614 }
615
616 // offset to start parameters
617 int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
618 int next_off = st_off - Interpreter::stackElementSize;
619
620 // Say 4 args:
621 // i st_off
622 // 0 32 T_LONG
623 // 1 24 T_VOID
624 // 2 16 T_OBJECT
625 // 3 8 T_BOOL
626 // - 0 return address
627 //
628 // However to make thing extra confusing. Because we can fit a long/double in
629 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
630 // leaves one slot empty and only stores to a single slot. In this case the
631 // slot that is occupied is the T_VOID slot. See I said it was confusing.
632
633 VMReg r_1 = regs[i].first();
634 VMReg r_2 = regs[i].second();
635 if (!r_1->is_valid()) {
636 assert(!r_2->is_valid(), "");
637 continue;
638 }
639 if (r_1->is_stack()) {
640 // memory to memory use rax
641 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
642 if (!r_2->is_valid()) {
643 // sign extend??
644 __ movl(rax, Address(rsp, ld_off));
645 __ movptr(Address(rsp, st_off), rax);
646
647 } else {
648
649 __ movq(rax, Address(rsp, ld_off));
650
651 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
652 // T_DOUBLE and T_LONG use two slots in the interpreter
653 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
654 // ld_off == LSW, ld_off+wordSize == MSW
655 // st_off == MSW, next_off == LSW
656 __ movq(Address(rsp, next_off), rax);
657 #ifdef ASSERT
658 // Overwrite the unused slot with known junk
659 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
660 __ movptr(Address(rsp, st_off), rax);
661 #endif /* ASSERT */
662 } else {
663 __ movq(Address(rsp, st_off), rax);
664 }
665 }
666 } else if (r_1->is_Register()) {
667 Register r = r_1->as_Register();
668 if (!r_2->is_valid()) {
669 // must be only an int (or less ) so move only 32bits to slot
670 // why not sign extend??
671 __ movl(Address(rsp, st_off), r);
672 } else {
673 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
674 // T_DOUBLE and T_LONG use two slots in the interpreter
675 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
676 // long/double in gpr
677 #ifdef ASSERT
678 // Overwrite the unused slot with known junk
679 __ mov64(rax, CONST64(0xdeadffffdeadaaab));
680 __ movptr(Address(rsp, st_off), rax);
681 #endif /* ASSERT */
682 __ movq(Address(rsp, next_off), r);
683 } else {
684 __ movptr(Address(rsp, st_off), r);
685 }
686 }
687 } else {
688 assert(r_1->is_XMMRegister(), "");
689 if (!r_2->is_valid()) {
690 // only a float use just part of the slot
691 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
692 } else {
693 #ifdef ASSERT
694 // Overwrite the unused slot with known junk
695 __ mov64(rax, CONST64(0xdeadffffdeadaaac));
696 __ movptr(Address(rsp, st_off), rax);
697 #endif /* ASSERT */
698 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
699 }
700 }
701 }
702
703 // Schedule the branch target address early.
704 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
705 __ jmp(rcx);
706 }
707
range_check(MacroAssembler * masm,Register pc_reg,Register temp_reg,address code_start,address code_end,Label & L_ok)708 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
709 address code_start, address code_end,
710 Label& L_ok) {
711 Label L_fail;
712 __ lea(temp_reg, ExternalAddress(code_start));
713 __ cmpptr(pc_reg, temp_reg);
714 __ jcc(Assembler::belowEqual, L_fail);
715 __ lea(temp_reg, ExternalAddress(code_end));
716 __ cmpptr(pc_reg, temp_reg);
717 __ jcc(Assembler::below, L_ok);
718 __ bind(L_fail);
719 }
720
gen_i2c_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs)721 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
722 int total_args_passed,
723 int comp_args_on_stack,
724 const BasicType *sig_bt,
725 const VMRegPair *regs) {
726
727 // Note: r13 contains the senderSP on entry. We must preserve it since
728 // we may do a i2c -> c2i transition if we lose a race where compiled
729 // code goes non-entrant while we get args ready.
730 // In addition we use r13 to locate all the interpreter args as
731 // we must align the stack to 16 bytes on an i2c entry else we
732 // lose alignment we expect in all compiled code and register
733 // save code can segv when fxsave instructions find improperly
734 // aligned stack pointer.
735
736 // Adapters can be frameless because they do not require the caller
737 // to perform additional cleanup work, such as correcting the stack pointer.
738 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
739 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
740 // even if a callee has modified the stack pointer.
741 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
742 // routinely repairs its caller's stack pointer (from sender_sp, which is set
743 // up via the senderSP register).
744 // In other words, if *either* the caller or callee is interpreted, we can
745 // get the stack pointer repaired after a call.
746 // This is why c2i and i2c adapters cannot be indefinitely composed.
747 // In particular, if a c2i adapter were to somehow call an i2c adapter,
748 // both caller and callee would be compiled methods, and neither would
749 // clean up the stack pointer changes performed by the two adapters.
750 // If this happens, control eventually transfers back to the compiled
751 // caller, but with an uncorrected stack, causing delayed havoc.
752
753 // Pick up the return address
754 __ movptr(rax, Address(rsp, 0));
755
756 if (VerifyAdapterCalls &&
757 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
758 // So, let's test for cascading c2i/i2c adapters right now.
759 // assert(Interpreter::contains($return_addr) ||
760 // StubRoutines::contains($return_addr),
761 // "i2c adapter must return to an interpreter frame");
762 __ block_comment("verify_i2c { ");
763 Label L_ok;
764 if (Interpreter::code() != NULL)
765 range_check(masm, rax, r11,
766 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
767 L_ok);
768 if (StubRoutines::code1() != NULL)
769 range_check(masm, rax, r11,
770 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
771 L_ok);
772 if (StubRoutines::code2() != NULL)
773 range_check(masm, rax, r11,
774 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
775 L_ok);
776 const char* msg = "i2c adapter must return to an interpreter frame";
777 __ block_comment(msg);
778 __ stop(msg);
779 __ bind(L_ok);
780 __ block_comment("} verify_i2ce ");
781 }
782
783 // Must preserve original SP for loading incoming arguments because
784 // we need to align the outgoing SP for compiled code.
785 __ movptr(r11, rsp);
786
787 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
788 // in registers, we will occasionally have no stack args.
789 int comp_words_on_stack = 0;
790 if (comp_args_on_stack) {
791 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
792 // registers are below. By subtracting stack0, we either get a negative
793 // number (all values in registers) or the maximum stack slot accessed.
794
795 // Convert 4-byte c2 stack slots to words.
796 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
797 // Round up to miminum stack alignment, in wordSize
798 comp_words_on_stack = align_up(comp_words_on_stack, 2);
799 __ subptr(rsp, comp_words_on_stack * wordSize);
800 }
801
802
803 // Ensure compiled code always sees stack at proper alignment
804 __ andptr(rsp, -16);
805
806 // push the return address and misalign the stack that youngest frame always sees
807 // as far as the placement of the call instruction
808 __ push(rax);
809
810 // Put saved SP in another register
811 const Register saved_sp = rax;
812 __ movptr(saved_sp, r11);
813
814 // Will jump to the compiled code just as if compiled code was doing it.
815 // Pre-load the register-jump target early, to schedule it better.
816 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
817
818 #if INCLUDE_JVMCI
819 if (EnableJVMCI || UseAOT) {
820 // check if this call should be routed towards a specific entry point
821 __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
822 Label no_alternative_target;
823 __ jcc(Assembler::equal, no_alternative_target);
824 __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
825 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
826 __ bind(no_alternative_target);
827 }
828 #endif // INCLUDE_JVMCI
829
830 // Now generate the shuffle code. Pick up all register args and move the
831 // rest through the floating point stack top.
832 for (int i = 0; i < total_args_passed; i++) {
833 if (sig_bt[i] == T_VOID) {
834 // Longs and doubles are passed in native word order, but misaligned
835 // in the 32-bit build.
836 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
837 continue;
838 }
839
840 // Pick up 0, 1 or 2 words from SP+offset.
841
842 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
843 "scrambled load targets?");
844 // Load in argument order going down.
845 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
846 // Point to interpreter value (vs. tag)
847 int next_off = ld_off - Interpreter::stackElementSize;
848 //
849 //
850 //
851 VMReg r_1 = regs[i].first();
852 VMReg r_2 = regs[i].second();
853 if (!r_1->is_valid()) {
854 assert(!r_2->is_valid(), "");
855 continue;
856 }
857 if (r_1->is_stack()) {
858 // Convert stack slot to an SP offset (+ wordSize to account for return address )
859 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
860
861 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
862 // and if we end up going thru a c2i because of a miss a reasonable value of r13
863 // will be generated.
864 if (!r_2->is_valid()) {
865 // sign extend???
866 __ movl(r13, Address(saved_sp, ld_off));
867 __ movptr(Address(rsp, st_off), r13);
868 } else {
869 //
870 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
871 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
872 // So we must adjust where to pick up the data to match the interpreter.
873 //
874 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
875 // are accessed as negative so LSW is at LOW address
876
877 // ld_off is MSW so get LSW
878 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
879 next_off : ld_off;
880 __ movq(r13, Address(saved_sp, offset));
881 // st_off is LSW (i.e. reg.first())
882 __ movq(Address(rsp, st_off), r13);
883 }
884 } else if (r_1->is_Register()) { // Register argument
885 Register r = r_1->as_Register();
886 assert(r != rax, "must be different");
887 if (r_2->is_valid()) {
888 //
889 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
890 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
891 // So we must adjust where to pick up the data to match the interpreter.
892
893 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
894 next_off : ld_off;
895
896 // this can be a misaligned move
897 __ movq(r, Address(saved_sp, offset));
898 } else {
899 // sign extend and use a full word?
900 __ movl(r, Address(saved_sp, ld_off));
901 }
902 } else {
903 if (!r_2->is_valid()) {
904 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
905 } else {
906 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
907 }
908 }
909 }
910
911 // 6243940 We might end up in handle_wrong_method if
912 // the callee is deoptimized as we race thru here. If that
913 // happens we don't want to take a safepoint because the
914 // caller frame will look interpreted and arguments are now
915 // "compiled" so it is much better to make this transition
916 // invisible to the stack walking code. Unfortunately if
917 // we try and find the callee by normal means a safepoint
918 // is possible. So we stash the desired callee in the thread
919 // and the vm will find there should this case occur.
920
921 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
922
923 // put Method* where a c2i would expect should we end up there
924 // only needed becaus eof c2 resolve stubs return Method* as a result in
925 // rax
926 __ mov(rax, rbx);
927 __ jmp(r11);
928 }
929
930 // ---------------------------------------------------------------
generate_i2c2i_adapters(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,AdapterFingerPrint * fingerprint)931 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
932 int total_args_passed,
933 int comp_args_on_stack,
934 const BasicType *sig_bt,
935 const VMRegPair *regs,
936 AdapterFingerPrint* fingerprint) {
937 address i2c_entry = __ pc();
938
939 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
940
941 // -------------------------------------------------------------------------
942 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
943 // to the interpreter. The args start out packed in the compiled layout. They
944 // need to be unpacked into the interpreter layout. This will almost always
945 // require some stack space. We grow the current (compiled) stack, then repack
946 // the args. We finally end in a jump to the generic interpreter entry point.
947 // On exit from the interpreter, the interpreter will restore our SP (lest the
948 // compiled code, which relys solely on SP and not RBP, get sick).
949
950 address c2i_unverified_entry = __ pc();
951 Label skip_fixup;
952 Label ok;
953
954 Register holder = rax;
955 Register receiver = j_rarg0;
956 Register temp = rbx;
957
958 {
959 __ load_klass(temp, receiver);
960 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
961 __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
962 __ jcc(Assembler::equal, ok);
963 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
964
965 __ bind(ok);
966 // Method might have been compiled since the call site was patched to
967 // interpreted if that is the case treat it as a miss so we can get
968 // the call site corrected.
969 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
970 __ jcc(Assembler::equal, skip_fixup);
971 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
972 }
973
974 address c2i_entry = __ pc();
975
976 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
977
978 __ flush();
979 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
980 }
981
c_calling_convention(const BasicType * sig_bt,VMRegPair * regs,VMRegPair * regs2,int total_args_passed)982 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
983 VMRegPair *regs,
984 VMRegPair *regs2,
985 int total_args_passed) {
986 assert(regs2 == NULL, "not needed on x86");
987 // We return the amount of VMRegImpl stack slots we need to reserve for all
988 // the arguments NOT counting out_preserve_stack_slots.
989
990 // NOTE: These arrays will have to change when c1 is ported
991 #ifdef _WIN64
992 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
993 c_rarg0, c_rarg1, c_rarg2, c_rarg3
994 };
995 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
996 c_farg0, c_farg1, c_farg2, c_farg3
997 };
998 #else
999 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1000 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
1001 };
1002 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1003 c_farg0, c_farg1, c_farg2, c_farg3,
1004 c_farg4, c_farg5, c_farg6, c_farg7
1005 };
1006 #endif // _WIN64
1007
1008
1009 uint int_args = 0;
1010 uint fp_args = 0;
1011 uint stk_args = 0; // inc by 2 each time
1012
1013 for (int i = 0; i < total_args_passed; i++) {
1014 switch (sig_bt[i]) {
1015 case T_BOOLEAN:
1016 case T_CHAR:
1017 case T_BYTE:
1018 case T_SHORT:
1019 case T_INT:
1020 if (int_args < Argument::n_int_register_parameters_c) {
1021 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1022 #ifdef _WIN64
1023 fp_args++;
1024 // Allocate slots for callee to stuff register args the stack.
1025 stk_args += 2;
1026 #endif
1027 } else {
1028 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1029 stk_args += 2;
1030 }
1031 break;
1032 case T_LONG:
1033 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1034 // fall through
1035 case T_OBJECT:
1036 case T_ARRAY:
1037 case T_ADDRESS:
1038 case T_METADATA:
1039 if (int_args < Argument::n_int_register_parameters_c) {
1040 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1041 #ifdef _WIN64
1042 fp_args++;
1043 stk_args += 2;
1044 #endif
1045 } else {
1046 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1047 stk_args += 2;
1048 }
1049 break;
1050 case T_FLOAT:
1051 if (fp_args < Argument::n_float_register_parameters_c) {
1052 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1053 #ifdef _WIN64
1054 int_args++;
1055 // Allocate slots for callee to stuff register args the stack.
1056 stk_args += 2;
1057 #endif
1058 } else {
1059 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1060 stk_args += 2;
1061 }
1062 break;
1063 case T_DOUBLE:
1064 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1065 if (fp_args < Argument::n_float_register_parameters_c) {
1066 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1067 #ifdef _WIN64
1068 int_args++;
1069 // Allocate slots for callee to stuff register args the stack.
1070 stk_args += 2;
1071 #endif
1072 } else {
1073 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1074 stk_args += 2;
1075 }
1076 break;
1077 case T_VOID: // Halves of longs and doubles
1078 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1079 regs[i].set_bad();
1080 break;
1081 default:
1082 ShouldNotReachHere();
1083 break;
1084 }
1085 }
1086 #ifdef _WIN64
1087 // windows abi requires that we always allocate enough stack space
1088 // for 4 64bit registers to be stored down.
1089 if (stk_args < 8) {
1090 stk_args = 8;
1091 }
1092 #endif // _WIN64
1093
1094 return stk_args;
1095 }
1096
1097 // On 64 bit we will store integer like items to the stack as
1098 // 64 bits items (sparc abi) even though java would only store
1099 // 32bits for a parameter. On 32bit it will simply be 32 bits
1100 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
move32_64(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1101 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1102 if (src.first()->is_stack()) {
1103 if (dst.first()->is_stack()) {
1104 // stack to stack
1105 __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
1106 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1107 } else {
1108 // stack to reg
1109 __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1110 }
1111 } else if (dst.first()->is_stack()) {
1112 // reg to stack
1113 // Do we really have to sign extend???
1114 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1115 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1116 } else {
1117 // Do we really have to sign extend???
1118 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
1119 if (dst.first() != src.first()) {
1120 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1121 }
1122 }
1123 }
1124
move_ptr(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1125 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1126 if (src.first()->is_stack()) {
1127 if (dst.first()->is_stack()) {
1128 // stack to stack
1129 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1130 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1131 } else {
1132 // stack to reg
1133 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1134 }
1135 } else if (dst.first()->is_stack()) {
1136 // reg to stack
1137 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1138 } else {
1139 if (dst.first() != src.first()) {
1140 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1141 }
1142 }
1143 }
1144
1145 // An oop arg. Must pass a handle not the oop itself
object_move(MacroAssembler * masm,OopMap * map,int oop_handle_offset,int framesize_in_slots,VMRegPair src,VMRegPair dst,bool is_receiver,int * receiver_offset)1146 static void object_move(MacroAssembler* masm,
1147 OopMap* map,
1148 int oop_handle_offset,
1149 int framesize_in_slots,
1150 VMRegPair src,
1151 VMRegPair dst,
1152 bool is_receiver,
1153 int* receiver_offset) {
1154
1155 // must pass a handle. First figure out the location we use as a handle
1156
1157 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1158
1159 // See if oop is NULL if it is we need no handle
1160
1161 if (src.first()->is_stack()) {
1162
1163 // Oop is already on the stack as an argument
1164 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1165 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1166 if (is_receiver) {
1167 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1168 }
1169
1170 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1171 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1172 // conditionally move a NULL
1173 __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1174 } else {
1175
1176 // Oop is in an a register we must store it to the space we reserve
1177 // on the stack for oop_handles and pass a handle if oop is non-NULL
1178
1179 const Register rOop = src.first()->as_Register();
1180 int oop_slot;
1181 if (rOop == j_rarg0)
1182 oop_slot = 0;
1183 else if (rOop == j_rarg1)
1184 oop_slot = 1;
1185 else if (rOop == j_rarg2)
1186 oop_slot = 2;
1187 else if (rOop == j_rarg3)
1188 oop_slot = 3;
1189 else if (rOop == j_rarg4)
1190 oop_slot = 4;
1191 else {
1192 assert(rOop == j_rarg5, "wrong register");
1193 oop_slot = 5;
1194 }
1195
1196 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1197 int offset = oop_slot*VMRegImpl::stack_slot_size;
1198
1199 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1200 // Store oop in handle area, may be NULL
1201 __ movptr(Address(rsp, offset), rOop);
1202 if (is_receiver) {
1203 *receiver_offset = offset;
1204 }
1205
1206 __ cmpptr(rOop, (int32_t)NULL_WORD);
1207 __ lea(rHandle, Address(rsp, offset));
1208 // conditionally move a NULL from the handle area where it was just stored
1209 __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1210 }
1211
1212 // If arg is on the stack then place it otherwise it is already in correct reg.
1213 if (dst.first()->is_stack()) {
1214 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1215 }
1216 }
1217
1218 // A float arg may have to do float reg int reg conversion
float_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1219 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1220 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1221
1222 // The calling conventions assures us that each VMregpair is either
1223 // all really one physical register or adjacent stack slots.
1224 // This greatly simplifies the cases here compared to sparc.
1225
1226 if (src.first()->is_stack()) {
1227 if (dst.first()->is_stack()) {
1228 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1229 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1230 } else {
1231 // stack to reg
1232 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1233 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1234 }
1235 } else if (dst.first()->is_stack()) {
1236 // reg to stack
1237 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1238 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1239 } else {
1240 // reg to reg
1241 // In theory these overlap but the ordering is such that this is likely a nop
1242 if ( src.first() != dst.first()) {
1243 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1244 }
1245 }
1246 }
1247
1248 // A long move
long_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1249 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1250
1251 // The calling conventions assures us that each VMregpair is either
1252 // all really one physical register or adjacent stack slots.
1253 // This greatly simplifies the cases here compared to sparc.
1254
1255 if (src.is_single_phys_reg() ) {
1256 if (dst.is_single_phys_reg()) {
1257 if (dst.first() != src.first()) {
1258 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1259 }
1260 } else {
1261 assert(dst.is_single_reg(), "not a stack pair");
1262 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1263 }
1264 } else if (dst.is_single_phys_reg()) {
1265 assert(src.is_single_reg(), "not a stack pair");
1266 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1267 } else {
1268 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1269 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1270 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1271 }
1272 }
1273
1274 // A double move
double_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1275 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1276
1277 // The calling conventions assures us that each VMregpair is either
1278 // all really one physical register or adjacent stack slots.
1279 // This greatly simplifies the cases here compared to sparc.
1280
1281 if (src.is_single_phys_reg() ) {
1282 if (dst.is_single_phys_reg()) {
1283 // In theory these overlap but the ordering is such that this is likely a nop
1284 if ( src.first() != dst.first()) {
1285 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1286 }
1287 } else {
1288 assert(dst.is_single_reg(), "not a stack pair");
1289 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1290 }
1291 } else if (dst.is_single_phys_reg()) {
1292 assert(src.is_single_reg(), "not a stack pair");
1293 __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1294 } else {
1295 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1296 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1297 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1298 }
1299 }
1300
1301
save_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1302 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1303 // We always ignore the frame_slots arg and just use the space just below frame pointer
1304 // which by this time is free to use
1305 switch (ret_type) {
1306 case T_FLOAT:
1307 __ movflt(Address(rbp, -wordSize), xmm0);
1308 break;
1309 case T_DOUBLE:
1310 __ movdbl(Address(rbp, -wordSize), xmm0);
1311 break;
1312 case T_VOID: break;
1313 default: {
1314 __ movptr(Address(rbp, -wordSize), rax);
1315 }
1316 }
1317 }
1318
restore_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1319 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1320 // We always ignore the frame_slots arg and just use the space just below frame pointer
1321 // which by this time is free to use
1322 switch (ret_type) {
1323 case T_FLOAT:
1324 __ movflt(xmm0, Address(rbp, -wordSize));
1325 break;
1326 case T_DOUBLE:
1327 __ movdbl(xmm0, Address(rbp, -wordSize));
1328 break;
1329 case T_VOID: break;
1330 default: {
1331 __ movptr(rax, Address(rbp, -wordSize));
1332 }
1333 }
1334 }
1335
save_args(MacroAssembler * masm,int arg_count,int first_arg,VMRegPair * args)1336 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1337 for ( int i = first_arg ; i < arg_count ; i++ ) {
1338 if (args[i].first()->is_Register()) {
1339 __ push(args[i].first()->as_Register());
1340 } else if (args[i].first()->is_XMMRegister()) {
1341 __ subptr(rsp, 2*wordSize);
1342 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1343 }
1344 }
1345 }
1346
restore_args(MacroAssembler * masm,int arg_count,int first_arg,VMRegPair * args)1347 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1348 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1349 if (args[i].first()->is_Register()) {
1350 __ pop(args[i].first()->as_Register());
1351 } else if (args[i].first()->is_XMMRegister()) {
1352 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1353 __ addptr(rsp, 2*wordSize);
1354 }
1355 }
1356 }
1357
1358
save_or_restore_arguments(MacroAssembler * masm,const int stack_slots,const int total_in_args,const int arg_save_area,OopMap * map,VMRegPair * in_regs,BasicType * in_sig_bt)1359 static void save_or_restore_arguments(MacroAssembler* masm,
1360 const int stack_slots,
1361 const int total_in_args,
1362 const int arg_save_area,
1363 OopMap* map,
1364 VMRegPair* in_regs,
1365 BasicType* in_sig_bt) {
1366 // if map is non-NULL then the code should store the values,
1367 // otherwise it should load them.
1368 int slot = arg_save_area;
1369 // Save down double word first
1370 for ( int i = 0; i < total_in_args; i++) {
1371 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1372 int offset = slot * VMRegImpl::stack_slot_size;
1373 slot += VMRegImpl::slots_per_word;
1374 assert(slot <= stack_slots, "overflow");
1375 if (map != NULL) {
1376 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1377 } else {
1378 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1379 }
1380 }
1381 if (in_regs[i].first()->is_Register() &&
1382 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1383 int offset = slot * VMRegImpl::stack_slot_size;
1384 if (map != NULL) {
1385 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1386 if (in_sig_bt[i] == T_ARRAY) {
1387 map->set_oop(VMRegImpl::stack2reg(slot));;
1388 }
1389 } else {
1390 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1391 }
1392 slot += VMRegImpl::slots_per_word;
1393 }
1394 }
1395 // Save or restore single word registers
1396 for ( int i = 0; i < total_in_args; i++) {
1397 if (in_regs[i].first()->is_Register()) {
1398 int offset = slot * VMRegImpl::stack_slot_size;
1399 slot++;
1400 assert(slot <= stack_slots, "overflow");
1401
1402 // Value is in an input register pass we must flush it to the stack
1403 const Register reg = in_regs[i].first()->as_Register();
1404 switch (in_sig_bt[i]) {
1405 case T_BOOLEAN:
1406 case T_CHAR:
1407 case T_BYTE:
1408 case T_SHORT:
1409 case T_INT:
1410 if (map != NULL) {
1411 __ movl(Address(rsp, offset), reg);
1412 } else {
1413 __ movl(reg, Address(rsp, offset));
1414 }
1415 break;
1416 case T_ARRAY:
1417 case T_LONG:
1418 // handled above
1419 break;
1420 case T_OBJECT:
1421 default: ShouldNotReachHere();
1422 }
1423 } else if (in_regs[i].first()->is_XMMRegister()) {
1424 if (in_sig_bt[i] == T_FLOAT) {
1425 int offset = slot * VMRegImpl::stack_slot_size;
1426 slot++;
1427 assert(slot <= stack_slots, "overflow");
1428 if (map != NULL) {
1429 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1430 } else {
1431 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1432 }
1433 }
1434 } else if (in_regs[i].first()->is_stack()) {
1435 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1436 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1437 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1438 }
1439 }
1440 }
1441 }
1442
1443 // Pin object, return pinned object or null in rax
gen_pin_object(MacroAssembler * masm,VMRegPair reg)1444 static void gen_pin_object(MacroAssembler* masm,
1445 VMRegPair reg) {
1446 __ block_comment("gen_pin_object {");
1447
1448 // rax always contains oop, either incoming or
1449 // pinned.
1450 Register tmp_reg = rax;
1451
1452 Label is_null;
1453 VMRegPair tmp;
1454 VMRegPair in_reg = reg;
1455
1456 tmp.set_ptr(tmp_reg->as_VMReg());
1457 if (reg.first()->is_stack()) {
1458 // Load the arg up from the stack
1459 move_ptr(masm, reg, tmp);
1460 reg = tmp;
1461 } else {
1462 __ movptr(rax, reg.first()->as_Register());
1463 }
1464 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1465 __ jccb(Assembler::equal, is_null);
1466
1467 if (reg.first()->as_Register() != c_rarg1) {
1468 __ movptr(c_rarg1, reg.first()->as_Register());
1469 }
1470
1471 __ call_VM_leaf(
1472 CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
1473 r15_thread, c_rarg1);
1474
1475 __ bind(is_null);
1476 __ block_comment("} gen_pin_object");
1477 }
1478
1479 // Unpin object
gen_unpin_object(MacroAssembler * masm,VMRegPair reg)1480 static void gen_unpin_object(MacroAssembler* masm,
1481 VMRegPair reg) {
1482 __ block_comment("gen_unpin_object {");
1483 Label is_null;
1484
1485 if (reg.first()->is_stack()) {
1486 __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
1487 } else if (reg.first()->as_Register() != c_rarg1) {
1488 __ movptr(c_rarg1, reg.first()->as_Register());
1489 }
1490
1491 __ testptr(c_rarg1, c_rarg1);
1492 __ jccb(Assembler::equal, is_null);
1493
1494 __ call_VM_leaf(
1495 CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
1496 r15_thread, c_rarg1);
1497
1498 __ bind(is_null);
1499 __ block_comment("} gen_unpin_object");
1500 }
1501
1502 // Check GCLocker::needs_gc and enter the runtime if it's true. This
1503 // keeps a new JNI critical region from starting until a GC has been
1504 // forced. Save down any oops in registers and describe them in an
1505 // OopMap.
check_needs_gc_for_critical_native(MacroAssembler * masm,int stack_slots,int total_c_args,int total_in_args,int arg_save_area,OopMapSet * oop_maps,VMRegPair * in_regs,BasicType * in_sig_bt)1506 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1507 int stack_slots,
1508 int total_c_args,
1509 int total_in_args,
1510 int arg_save_area,
1511 OopMapSet* oop_maps,
1512 VMRegPair* in_regs,
1513 BasicType* in_sig_bt) {
1514 __ block_comment("check GCLocker::needs_gc");
1515 Label cont;
1516 __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1517 __ jcc(Assembler::equal, cont);
1518
1519 // Save down any incoming oops and call into the runtime to halt for a GC
1520
1521 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1522 save_or_restore_arguments(masm, stack_slots, total_in_args,
1523 arg_save_area, map, in_regs, in_sig_bt);
1524
1525 address the_pc = __ pc();
1526 oop_maps->add_gc_map( __ offset(), map);
1527 __ set_last_Java_frame(rsp, noreg, the_pc);
1528
1529 __ block_comment("block_for_jni_critical");
1530 __ movptr(c_rarg0, r15_thread);
1531 __ mov(r12, rsp); // remember sp
1532 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1533 __ andptr(rsp, -16); // align stack as required by ABI
1534 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1535 __ mov(rsp, r12); // restore sp
1536 __ reinit_heapbase();
1537
1538 __ reset_last_Java_frame(false);
1539
1540 save_or_restore_arguments(masm, stack_slots, total_in_args,
1541 arg_save_area, NULL, in_regs, in_sig_bt);
1542 __ bind(cont);
1543 #ifdef ASSERT
1544 if (StressCriticalJNINatives) {
1545 // Stress register saving
1546 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1547 save_or_restore_arguments(masm, stack_slots, total_in_args,
1548 arg_save_area, map, in_regs, in_sig_bt);
1549 // Destroy argument registers
1550 for (int i = 0; i < total_in_args - 1; i++) {
1551 if (in_regs[i].first()->is_Register()) {
1552 const Register reg = in_regs[i].first()->as_Register();
1553 __ xorptr(reg, reg);
1554 } else if (in_regs[i].first()->is_XMMRegister()) {
1555 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1556 } else if (in_regs[i].first()->is_FloatRegister()) {
1557 ShouldNotReachHere();
1558 } else if (in_regs[i].first()->is_stack()) {
1559 // Nothing to do
1560 } else {
1561 ShouldNotReachHere();
1562 }
1563 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1564 i++;
1565 }
1566 }
1567
1568 save_or_restore_arguments(masm, stack_slots, total_in_args,
1569 arg_save_area, NULL, in_regs, in_sig_bt);
1570 }
1571 #endif
1572 }
1573
1574 // Unpack an array argument into a pointer to the body and the length
1575 // if the array is non-null, otherwise pass 0 for both.
unpack_array_argument(MacroAssembler * masm,VMRegPair reg,BasicType in_elem_type,VMRegPair body_arg,VMRegPair length_arg)1576 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1577 Register tmp_reg = rax;
1578 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1579 "possible collision");
1580 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1581 "possible collision");
1582
1583 __ block_comment("unpack_array_argument {");
1584
1585 // Pass the length, ptr pair
1586 Label is_null, done;
1587 VMRegPair tmp;
1588 tmp.set_ptr(tmp_reg->as_VMReg());
1589 if (reg.first()->is_stack()) {
1590 // Load the arg up from the stack
1591 move_ptr(masm, reg, tmp);
1592 reg = tmp;
1593 }
1594 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1595 __ jccb(Assembler::equal, is_null);
1596 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1597 move_ptr(masm, tmp, body_arg);
1598 // load the length relative to the body.
1599 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1600 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1601 move32_64(masm, tmp, length_arg);
1602 __ jmpb(done);
1603 __ bind(is_null);
1604 // Pass zeros
1605 __ xorptr(tmp_reg, tmp_reg);
1606 move_ptr(masm, tmp, body_arg);
1607 move32_64(masm, tmp, length_arg);
1608 __ bind(done);
1609
1610 __ block_comment("} unpack_array_argument");
1611 }
1612
1613
1614 // Different signatures may require very different orders for the move
1615 // to avoid clobbering other arguments. There's no simple way to
1616 // order them safely. Compute a safe order for issuing stores and
1617 // break any cycles in those stores. This code is fairly general but
1618 // it's not necessary on the other platforms so we keep it in the
1619 // platform dependent code instead of moving it into a shared file.
1620 // (See bugs 7013347 & 7145024.)
1621 // Note that this code is specific to LP64.
1622 class ComputeMoveOrder: public StackObj {
1623 class MoveOperation: public ResourceObj {
1624 friend class ComputeMoveOrder;
1625 private:
1626 VMRegPair _src;
1627 VMRegPair _dst;
1628 int _src_index;
1629 int _dst_index;
1630 bool _processed;
1631 MoveOperation* _next;
1632 MoveOperation* _prev;
1633
get_id(VMRegPair r)1634 static int get_id(VMRegPair r) {
1635 return r.first()->value();
1636 }
1637
1638 public:
MoveOperation(int src_index,VMRegPair src,int dst_index,VMRegPair dst)1639 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1640 _src(src)
1641 , _dst(dst)
1642 , _src_index(src_index)
1643 , _dst_index(dst_index)
1644 , _processed(false)
1645 , _next(NULL)
1646 , _prev(NULL) {
1647 }
1648
src() const1649 VMRegPair src() const { return _src; }
src_id() const1650 int src_id() const { return get_id(src()); }
src_index() const1651 int src_index() const { return _src_index; }
dst() const1652 VMRegPair dst() const { return _dst; }
set_dst(int i,VMRegPair dst)1653 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
dst_index() const1654 int dst_index() const { return _dst_index; }
dst_id() const1655 int dst_id() const { return get_id(dst()); }
next() const1656 MoveOperation* next() const { return _next; }
prev() const1657 MoveOperation* prev() const { return _prev; }
set_processed()1658 void set_processed() { _processed = true; }
is_processed() const1659 bool is_processed() const { return _processed; }
1660
1661 // insert
break_cycle(VMRegPair temp_register)1662 void break_cycle(VMRegPair temp_register) {
1663 // create a new store following the last store
1664 // to move from the temp_register to the original
1665 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1666
1667 // break the cycle of links and insert new_store at the end
1668 // break the reverse link.
1669 MoveOperation* p = prev();
1670 assert(p->next() == this, "must be");
1671 _prev = NULL;
1672 p->_next = new_store;
1673 new_store->_prev = p;
1674
1675 // change the original store to save it's value in the temp.
1676 set_dst(-1, temp_register);
1677 }
1678
link(GrowableArray<MoveOperation * > & killer)1679 void link(GrowableArray<MoveOperation*>& killer) {
1680 // link this store in front the store that it depends on
1681 MoveOperation* n = killer.at_grow(src_id(), NULL);
1682 if (n != NULL) {
1683 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1684 _next = n;
1685 n->_prev = this;
1686 }
1687 }
1688 };
1689
1690 private:
1691 GrowableArray<MoveOperation*> edges;
1692
1693 public:
ComputeMoveOrder(int total_in_args,VMRegPair * in_regs,int total_c_args,VMRegPair * out_regs,BasicType * in_sig_bt,GrowableArray<int> & arg_order,VMRegPair tmp_vmreg)1694 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1695 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1696 // Move operations where the dest is the stack can all be
1697 // scheduled first since they can't interfere with the other moves.
1698 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1699 if (in_sig_bt[i] == T_ARRAY) {
1700 c_arg--;
1701 if (out_regs[c_arg].first()->is_stack() &&
1702 out_regs[c_arg + 1].first()->is_stack()) {
1703 arg_order.push(i);
1704 arg_order.push(c_arg);
1705 } else {
1706 if (out_regs[c_arg].first()->is_stack() ||
1707 in_regs[i].first() == out_regs[c_arg].first()) {
1708 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1709 } else {
1710 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1711 }
1712 }
1713 } else if (in_sig_bt[i] == T_VOID) {
1714 arg_order.push(i);
1715 arg_order.push(c_arg);
1716 } else {
1717 if (out_regs[c_arg].first()->is_stack() ||
1718 in_regs[i].first() == out_regs[c_arg].first()) {
1719 arg_order.push(i);
1720 arg_order.push(c_arg);
1721 } else {
1722 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1723 }
1724 }
1725 }
1726 // Break any cycles in the register moves and emit the in the
1727 // proper order.
1728 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1729 for (int i = 0; i < stores->length(); i++) {
1730 arg_order.push(stores->at(i)->src_index());
1731 arg_order.push(stores->at(i)->dst_index());
1732 }
1733 }
1734
1735 // Collected all the move operations
add_edge(int src_index,VMRegPair src,int dst_index,VMRegPair dst)1736 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1737 if (src.first() == dst.first()) return;
1738 edges.append(new MoveOperation(src_index, src, dst_index, dst));
1739 }
1740
1741 // Walk the edges breaking cycles between moves. The result list
1742 // can be walked in order to produce the proper set of loads
get_store_order(VMRegPair temp_register)1743 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1744 // Record which moves kill which values
1745 GrowableArray<MoveOperation*> killer;
1746 for (int i = 0; i < edges.length(); i++) {
1747 MoveOperation* s = edges.at(i);
1748 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1749 killer.at_put_grow(s->dst_id(), s, NULL);
1750 }
1751 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1752 "make sure temp isn't in the registers that are killed");
1753
1754 // create links between loads and stores
1755 for (int i = 0; i < edges.length(); i++) {
1756 edges.at(i)->link(killer);
1757 }
1758
1759 // at this point, all the move operations are chained together
1760 // in a doubly linked list. Processing it backwards finds
1761 // the beginning of the chain, forwards finds the end. If there's
1762 // a cycle it can be broken at any point, so pick an edge and walk
1763 // backward until the list ends or we end where we started.
1764 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1765 for (int e = 0; e < edges.length(); e++) {
1766 MoveOperation* s = edges.at(e);
1767 if (!s->is_processed()) {
1768 MoveOperation* start = s;
1769 // search for the beginning of the chain or cycle
1770 while (start->prev() != NULL && start->prev() != s) {
1771 start = start->prev();
1772 }
1773 if (start->prev() == s) {
1774 start->break_cycle(temp_register);
1775 }
1776 // walk the chain forward inserting to store list
1777 while (start != NULL) {
1778 stores->append(start);
1779 start->set_processed();
1780 start = start->next();
1781 }
1782 }
1783 }
1784 return stores;
1785 }
1786 };
1787
verify_oop_args(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1788 static void verify_oop_args(MacroAssembler* masm,
1789 const methodHandle& method,
1790 const BasicType* sig_bt,
1791 const VMRegPair* regs) {
1792 Register temp_reg = rbx; // not part of any compiled calling seq
1793 if (VerifyOops) {
1794 for (int i = 0; i < method->size_of_parameters(); i++) {
1795 if (sig_bt[i] == T_OBJECT ||
1796 sig_bt[i] == T_ARRAY) {
1797 VMReg r = regs[i].first();
1798 assert(r->is_valid(), "bad oop arg");
1799 if (r->is_stack()) {
1800 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1801 __ verify_oop(temp_reg);
1802 } else {
1803 __ verify_oop(r->as_Register());
1804 }
1805 }
1806 }
1807 }
1808 }
1809
gen_special_dispatch(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1810 static void gen_special_dispatch(MacroAssembler* masm,
1811 const methodHandle& method,
1812 const BasicType* sig_bt,
1813 const VMRegPair* regs) {
1814 verify_oop_args(masm, method, sig_bt, regs);
1815 vmIntrinsics::ID iid = method->intrinsic_id();
1816
1817 // Now write the args into the outgoing interpreter space
1818 bool has_receiver = false;
1819 Register receiver_reg = noreg;
1820 int member_arg_pos = -1;
1821 Register member_reg = noreg;
1822 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1823 if (ref_kind != 0) {
1824 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1825 member_reg = rbx; // known to be free at this point
1826 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1827 } else if (iid == vmIntrinsics::_invokeBasic) {
1828 has_receiver = true;
1829 } else {
1830 fatal("unexpected intrinsic id %d", iid);
1831 }
1832
1833 if (member_reg != noreg) {
1834 // Load the member_arg into register, if necessary.
1835 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1836 VMReg r = regs[member_arg_pos].first();
1837 if (r->is_stack()) {
1838 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1839 } else {
1840 // no data motion is needed
1841 member_reg = r->as_Register();
1842 }
1843 }
1844
1845 if (has_receiver) {
1846 // Make sure the receiver is loaded into a register.
1847 assert(method->size_of_parameters() > 0, "oob");
1848 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1849 VMReg r = regs[0].first();
1850 assert(r->is_valid(), "bad receiver arg");
1851 if (r->is_stack()) {
1852 // Porting note: This assumes that compiled calling conventions always
1853 // pass the receiver oop in a register. If this is not true on some
1854 // platform, pick a temp and load the receiver from stack.
1855 fatal("receiver always in a register");
1856 receiver_reg = j_rarg0; // known to be free at this point
1857 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1858 } else {
1859 // no data motion is needed
1860 receiver_reg = r->as_Register();
1861 }
1862 }
1863
1864 // Figure out which address we are really jumping to:
1865 MethodHandles::generate_method_handle_dispatch(masm, iid,
1866 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1867 }
1868
1869 // ---------------------------------------------------------------------------
1870 // Generate a native wrapper for a given method. The method takes arguments
1871 // in the Java compiled code convention, marshals them to the native
1872 // convention (handlizes oops, etc), transitions to native, makes the call,
1873 // returns to java state (possibly blocking), unhandlizes any result and
1874 // returns.
1875 //
1876 // Critical native functions are a shorthand for the use of
1877 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1878 // functions. The wrapper is expected to unpack the arguments before
1879 // passing them to the callee and perform checks before and after the
1880 // native call to ensure that they GCLocker
1881 // lock_critical/unlock_critical semantics are followed. Some other
1882 // parts of JNI setup are skipped like the tear down of the JNI handle
1883 // block and the check for pending exceptions it's impossible for them
1884 // to be thrown.
1885 //
1886 // They are roughly structured like this:
1887 // if (GCLocker::needs_gc())
1888 // SharedRuntime::block_for_jni_critical();
1889 // tranistion to thread_in_native
1890 // unpack arrray arguments and call native entry point
1891 // check for safepoint in progress
1892 // check if any thread suspend flags are set
1893 // call into JVM and possible unlock the JNI critical
1894 // if a GC was suppressed while in the critical native.
1895 // transition back to thread_in_Java
1896 // return to caller
1897 //
generate_native_wrapper(MacroAssembler * masm,const methodHandle & method,int compile_id,BasicType * in_sig_bt,VMRegPair * in_regs,BasicType ret_type)1898 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1899 const methodHandle& method,
1900 int compile_id,
1901 BasicType* in_sig_bt,
1902 VMRegPair* in_regs,
1903 BasicType ret_type) {
1904 if (method->is_method_handle_intrinsic()) {
1905 vmIntrinsics::ID iid = method->intrinsic_id();
1906 intptr_t start = (intptr_t)__ pc();
1907 int vep_offset = ((intptr_t)__ pc()) - start;
1908 gen_special_dispatch(masm,
1909 method,
1910 in_sig_bt,
1911 in_regs);
1912 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1913 __ flush();
1914 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1915 return nmethod::new_native_nmethod(method,
1916 compile_id,
1917 masm->code(),
1918 vep_offset,
1919 frame_complete,
1920 stack_slots / VMRegImpl::slots_per_word,
1921 in_ByteSize(-1),
1922 in_ByteSize(-1),
1923 (OopMapSet*)NULL);
1924 }
1925 bool is_critical_native = true;
1926 address native_func = method->critical_native_function();
1927 if (native_func == NULL) {
1928 native_func = method->native_function();
1929 is_critical_native = false;
1930 }
1931 assert(native_func != NULL, "must have function");
1932
1933 // An OopMap for lock (and class if static)
1934 OopMapSet *oop_maps = new OopMapSet();
1935 intptr_t start = (intptr_t)__ pc();
1936
1937 // We have received a description of where all the java arg are located
1938 // on entry to the wrapper. We need to convert these args to where
1939 // the jni function will expect them. To figure out where they go
1940 // we convert the java signature to a C signature by inserting
1941 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1942
1943 const int total_in_args = method->size_of_parameters();
1944 int total_c_args = total_in_args;
1945 if (!is_critical_native) {
1946 total_c_args += 1;
1947 if (method->is_static()) {
1948 total_c_args++;
1949 }
1950 } else {
1951 for (int i = 0; i < total_in_args; i++) {
1952 if (in_sig_bt[i] == T_ARRAY) {
1953 total_c_args++;
1954 }
1955 }
1956 }
1957
1958 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1959 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1960 BasicType* in_elem_bt = NULL;
1961
1962 int argc = 0;
1963 if (!is_critical_native) {
1964 out_sig_bt[argc++] = T_ADDRESS;
1965 if (method->is_static()) {
1966 out_sig_bt[argc++] = T_OBJECT;
1967 }
1968
1969 for (int i = 0; i < total_in_args ; i++ ) {
1970 out_sig_bt[argc++] = in_sig_bt[i];
1971 }
1972 } else {
1973 Thread* THREAD = Thread::current();
1974 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1975 SignatureStream ss(method->signature());
1976 for (int i = 0; i < total_in_args ; i++ ) {
1977 if (in_sig_bt[i] == T_ARRAY) {
1978 // Arrays are passed as int, elem* pair
1979 out_sig_bt[argc++] = T_INT;
1980 out_sig_bt[argc++] = T_ADDRESS;
1981 Symbol* atype = ss.as_symbol(CHECK_NULL);
1982 const char* at = atype->as_C_string();
1983 if (strlen(at) == 2) {
1984 assert(at[0] == '[', "must be");
1985 switch (at[1]) {
1986 case 'B': in_elem_bt[i] = T_BYTE; break;
1987 case 'C': in_elem_bt[i] = T_CHAR; break;
1988 case 'D': in_elem_bt[i] = T_DOUBLE; break;
1989 case 'F': in_elem_bt[i] = T_FLOAT; break;
1990 case 'I': in_elem_bt[i] = T_INT; break;
1991 case 'J': in_elem_bt[i] = T_LONG; break;
1992 case 'S': in_elem_bt[i] = T_SHORT; break;
1993 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
1994 default: ShouldNotReachHere();
1995 }
1996 }
1997 } else {
1998 out_sig_bt[argc++] = in_sig_bt[i];
1999 in_elem_bt[i] = T_VOID;
2000 }
2001 if (in_sig_bt[i] != T_VOID) {
2002 assert(in_sig_bt[i] == ss.type(), "must match");
2003 ss.next();
2004 }
2005 }
2006 }
2007
2008 // Now figure out where the args must be stored and how much stack space
2009 // they require.
2010 int out_arg_slots;
2011 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2012
2013 // Compute framesize for the wrapper. We need to handlize all oops in
2014 // incoming registers
2015
2016 // Calculate the total number of stack slots we will need.
2017
2018 // First count the abi requirement plus all of the outgoing args
2019 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2020
2021 // Now the space for the inbound oop handle area
2022 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
2023 if (is_critical_native) {
2024 // Critical natives may have to call out so they need a save area
2025 // for register arguments.
2026 int double_slots = 0;
2027 int single_slots = 0;
2028 for ( int i = 0; i < total_in_args; i++) {
2029 if (in_regs[i].first()->is_Register()) {
2030 const Register reg = in_regs[i].first()->as_Register();
2031 switch (in_sig_bt[i]) {
2032 case T_BOOLEAN:
2033 case T_BYTE:
2034 case T_SHORT:
2035 case T_CHAR:
2036 case T_INT: single_slots++; break;
2037 case T_ARRAY: // specific to LP64 (7145024)
2038 case T_LONG: double_slots++; break;
2039 default: ShouldNotReachHere();
2040 }
2041 } else if (in_regs[i].first()->is_XMMRegister()) {
2042 switch (in_sig_bt[i]) {
2043 case T_FLOAT: single_slots++; break;
2044 case T_DOUBLE: double_slots++; break;
2045 default: ShouldNotReachHere();
2046 }
2047 } else if (in_regs[i].first()->is_FloatRegister()) {
2048 ShouldNotReachHere();
2049 }
2050 }
2051 total_save_slots = double_slots * 2 + single_slots;
2052 // align the save area
2053 if (double_slots != 0) {
2054 stack_slots = align_up(stack_slots, 2);
2055 }
2056 }
2057
2058 int oop_handle_offset = stack_slots;
2059 stack_slots += total_save_slots;
2060
2061 // Now any space we need for handlizing a klass if static method
2062
2063 int klass_slot_offset = 0;
2064 int klass_offset = -1;
2065 int lock_slot_offset = 0;
2066 bool is_static = false;
2067
2068 if (method->is_static()) {
2069 klass_slot_offset = stack_slots;
2070 stack_slots += VMRegImpl::slots_per_word;
2071 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2072 is_static = true;
2073 }
2074
2075 // Plus a lock if needed
2076
2077 if (method->is_synchronized()) {
2078 lock_slot_offset = stack_slots;
2079 stack_slots += VMRegImpl::slots_per_word;
2080 }
2081
2082 // Now a place (+2) to save return values or temp during shuffling
2083 // + 4 for return address (which we own) and saved rbp
2084 stack_slots += 6;
2085
2086 // Ok The space we have allocated will look like:
2087 //
2088 //
2089 // FP-> | |
2090 // |---------------------|
2091 // | 2 slots for moves |
2092 // |---------------------|
2093 // | lock box (if sync) |
2094 // |---------------------| <- lock_slot_offset
2095 // | klass (if static) |
2096 // |---------------------| <- klass_slot_offset
2097 // | oopHandle area |
2098 // |---------------------| <- oop_handle_offset (6 java arg registers)
2099 // | outbound memory |
2100 // | based arguments |
2101 // | |
2102 // |---------------------|
2103 // | |
2104 // SP-> | out_preserved_slots |
2105 //
2106 //
2107
2108
2109 // Now compute actual number of stack words we need rounding to make
2110 // stack properly aligned.
2111 stack_slots = align_up(stack_slots, StackAlignmentInSlots);
2112
2113 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2114
2115 // First thing make an ic check to see if we should even be here
2116
2117 // We are free to use all registers as temps without saving them and
2118 // restoring them except rbp. rbp is the only callee save register
2119 // as far as the interpreter and the compiler(s) are concerned.
2120
2121
2122 const Register ic_reg = rax;
2123 const Register receiver = j_rarg0;
2124
2125 Label hit;
2126 Label exception_pending;
2127
2128 assert_different_registers(ic_reg, receiver, rscratch1);
2129 __ verify_oop(receiver);
2130 __ load_klass(rscratch1, receiver);
2131 __ cmpq(ic_reg, rscratch1);
2132 __ jcc(Assembler::equal, hit);
2133
2134 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2135
2136 // Verified entry point must be aligned
2137 __ align(8);
2138
2139 __ bind(hit);
2140
2141 int vep_offset = ((intptr_t)__ pc()) - start;
2142
2143 #ifdef COMPILER1
2144 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
2145 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
2146 inline_check_hashcode_from_object_header(masm, method, j_rarg0 /*obj_reg*/, rax /*result*/);
2147 }
2148 #endif // COMPILER1
2149
2150 // The instruction at the verified entry point must be 5 bytes or longer
2151 // because it can be patched on the fly by make_non_entrant. The stack bang
2152 // instruction fits that requirement.
2153
2154 // Generate stack overflow check
2155
2156 if (UseStackBanging) {
2157 __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
2158 } else {
2159 // need a 5 byte instruction to allow MT safe patching to non-entrant
2160 __ fat_nop();
2161 }
2162
2163 // Generate a new frame for the wrapper.
2164 __ enter();
2165 // -2 because return address is already present and so is saved rbp
2166 __ subptr(rsp, stack_size - 2*wordSize);
2167
2168 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
2169 bs->nmethod_entry_barrier(masm);
2170
2171 // Frame is now completed as far as size and linkage.
2172 int frame_complete = ((intptr_t)__ pc()) - start;
2173
2174 if (UseRTMLocking) {
2175 // Abort RTM transaction before calling JNI
2176 // because critical section will be large and will be
2177 // aborted anyway. Also nmethod could be deoptimized.
2178 __ xabort(0);
2179 }
2180
2181 #ifdef ASSERT
2182 {
2183 Label L;
2184 __ mov(rax, rsp);
2185 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2186 __ cmpptr(rax, rsp);
2187 __ jcc(Assembler::equal, L);
2188 __ stop("improperly aligned stack");
2189 __ bind(L);
2190 }
2191 #endif /* ASSERT */
2192
2193
2194 // We use r14 as the oop handle for the receiver/klass
2195 // It is callee save so it survives the call to native
2196
2197 const Register oop_handle_reg = r14;
2198
2199 if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
2200 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2201 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2202 }
2203
2204 //
2205 // We immediately shuffle the arguments so that any vm call we have to
2206 // make from here on out (sync slow path, jvmti, etc.) we will have
2207 // captured the oops from our caller and have a valid oopMap for
2208 // them.
2209
2210 // -----------------
2211 // The Grand Shuffle
2212
2213 // The Java calling convention is either equal (linux) or denser (win64) than the
2214 // c calling convention. However the because of the jni_env argument the c calling
2215 // convention always has at least one more (and two for static) arguments than Java.
2216 // Therefore if we move the args from java -> c backwards then we will never have
2217 // a register->register conflict and we don't have to build a dependency graph
2218 // and figure out how to break any cycles.
2219 //
2220
2221 // Record esp-based slot for receiver on stack for non-static methods
2222 int receiver_offset = -1;
2223
2224 // This is a trick. We double the stack slots so we can claim
2225 // the oops in the caller's frame. Since we are sure to have
2226 // more args than the caller doubling is enough to make
2227 // sure we can capture all the incoming oop args from the
2228 // caller.
2229 //
2230 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2231
2232 // Mark location of rbp (someday)
2233 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2234
2235 // Use eax, ebx as temporaries during any memory-memory moves we have to do
2236 // All inbound args are referenced based on rbp and all outbound args via rsp.
2237
2238
2239 #ifdef ASSERT
2240 bool reg_destroyed[RegisterImpl::number_of_registers];
2241 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2242 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2243 reg_destroyed[r] = false;
2244 }
2245 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2246 freg_destroyed[f] = false;
2247 }
2248
2249 #endif /* ASSERT */
2250
2251 // This may iterate in two different directions depending on the
2252 // kind of native it is. The reason is that for regular JNI natives
2253 // the incoming and outgoing registers are offset upwards and for
2254 // critical natives they are offset down.
2255 GrowableArray<int> arg_order(2 * total_in_args);
2256 // Inbound arguments that need to be pinned for critical natives
2257 GrowableArray<int> pinned_args(total_in_args);
2258 // Current stack slot for storing register based array argument
2259 int pinned_slot = oop_handle_offset;
2260
2261 VMRegPair tmp_vmreg;
2262 tmp_vmreg.set2(rbx->as_VMReg());
2263
2264 if (!is_critical_native) {
2265 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2266 arg_order.push(i);
2267 arg_order.push(c_arg);
2268 }
2269 } else {
2270 // Compute a valid move order, using tmp_vmreg to break any cycles
2271 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2272 }
2273
2274 int temploc = -1;
2275 for (int ai = 0; ai < arg_order.length(); ai += 2) {
2276 int i = arg_order.at(ai);
2277 int c_arg = arg_order.at(ai + 1);
2278 __ block_comment(err_msg("move %d -> %d", i, c_arg));
2279 if (c_arg == -1) {
2280 assert(is_critical_native, "should only be required for critical natives");
2281 // This arg needs to be moved to a temporary
2282 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2283 in_regs[i] = tmp_vmreg;
2284 temploc = i;
2285 continue;
2286 } else if (i == -1) {
2287 assert(is_critical_native, "should only be required for critical natives");
2288 // Read from the temporary location
2289 assert(temploc != -1, "must be valid");
2290 i = temploc;
2291 temploc = -1;
2292 }
2293 #ifdef ASSERT
2294 if (in_regs[i].first()->is_Register()) {
2295 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2296 } else if (in_regs[i].first()->is_XMMRegister()) {
2297 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2298 }
2299 if (out_regs[c_arg].first()->is_Register()) {
2300 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2301 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2302 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2303 }
2304 #endif /* ASSERT */
2305 switch (in_sig_bt[i]) {
2306 case T_ARRAY:
2307 if (is_critical_native) {
2308 // pin before unpack
2309 if (Universe::heap()->supports_object_pinning()) {
2310 save_args(masm, total_c_args, 0, out_regs);
2311 gen_pin_object(masm, in_regs[i]);
2312 pinned_args.append(i);
2313 restore_args(masm, total_c_args, 0, out_regs);
2314
2315 // rax has pinned array
2316 VMRegPair result_reg;
2317 result_reg.set_ptr(rax->as_VMReg());
2318 move_ptr(masm, result_reg, in_regs[i]);
2319 if (!in_regs[i].first()->is_stack()) {
2320 assert(pinned_slot <= stack_slots, "overflow");
2321 move_ptr(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
2322 pinned_slot += VMRegImpl::slots_per_word;
2323 }
2324 }
2325 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2326 c_arg++;
2327 #ifdef ASSERT
2328 if (out_regs[c_arg].first()->is_Register()) {
2329 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2330 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2331 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2332 }
2333 #endif
2334 break;
2335 }
2336 case T_OBJECT:
2337 assert(!is_critical_native, "no oop arguments");
2338 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2339 ((i == 0) && (!is_static)),
2340 &receiver_offset);
2341 break;
2342 case T_VOID:
2343 break;
2344
2345 case T_FLOAT:
2346 float_move(masm, in_regs[i], out_regs[c_arg]);
2347 break;
2348
2349 case T_DOUBLE:
2350 assert( i + 1 < total_in_args &&
2351 in_sig_bt[i + 1] == T_VOID &&
2352 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2353 double_move(masm, in_regs[i], out_regs[c_arg]);
2354 break;
2355
2356 case T_LONG :
2357 long_move(masm, in_regs[i], out_regs[c_arg]);
2358 break;
2359
2360 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2361
2362 default:
2363 move32_64(masm, in_regs[i], out_regs[c_arg]);
2364 }
2365 }
2366
2367 int c_arg;
2368
2369 // Pre-load a static method's oop into r14. Used both by locking code and
2370 // the normal JNI call code.
2371 if (!is_critical_native) {
2372 // point c_arg at the first arg that is already loaded in case we
2373 // need to spill before we call out
2374 c_arg = total_c_args - total_in_args;
2375
2376 if (method->is_static()) {
2377
2378 // load oop into a register
2379 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2380
2381 // Now handlize the static class mirror it's known not-null.
2382 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2383 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2384
2385 // Now get the handle
2386 __ lea(oop_handle_reg, Address(rsp, klass_offset));
2387 // store the klass handle as second argument
2388 __ movptr(c_rarg1, oop_handle_reg);
2389 // and protect the arg if we must spill
2390 c_arg--;
2391 }
2392 } else {
2393 // For JNI critical methods we need to save all registers in save_args.
2394 c_arg = 0;
2395 }
2396
2397 // Change state to native (we save the return address in the thread, since it might not
2398 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2399 // points into the right code segment. It does not have to be the correct return pc.
2400 // We use the same pc/oopMap repeatedly when we call out
2401
2402 intptr_t the_pc = (intptr_t) __ pc();
2403 oop_maps->add_gc_map(the_pc - start, map);
2404
2405 __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2406
2407
2408 // We have all of the arguments setup at this point. We must not touch any register
2409 // argument registers at this point (what if we save/restore them there are no oop?
2410
2411 {
2412 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2413 // protect the args we've loaded
2414 save_args(masm, total_c_args, c_arg, out_regs);
2415 __ mov_metadata(c_rarg1, method());
2416 __ call_VM_leaf(
2417 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2418 r15_thread, c_rarg1);
2419 restore_args(masm, total_c_args, c_arg, out_regs);
2420 }
2421
2422 // RedefineClasses() tracing support for obsolete method entry
2423 if (log_is_enabled(Trace, redefine, class, obsolete)) {
2424 // protect the args we've loaded
2425 save_args(masm, total_c_args, c_arg, out_regs);
2426 __ mov_metadata(c_rarg1, method());
2427 __ call_VM_leaf(
2428 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2429 r15_thread, c_rarg1);
2430 restore_args(masm, total_c_args, c_arg, out_regs);
2431 }
2432
2433 // Lock a synchronized method
2434
2435 // Register definitions used by locking and unlocking
2436
2437 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
2438 const Register obj_reg = rbx; // Will contain the oop
2439 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
2440 const Register old_hdr = r13; // value of old header at unlock time
2441
2442 Label slow_path_lock;
2443 Label lock_done;
2444
2445 if (method->is_synchronized()) {
2446 assert(!is_critical_native, "unhandled");
2447
2448
2449 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2450
2451 // Get the handle (the 2nd argument)
2452 __ mov(oop_handle_reg, c_rarg1);
2453
2454 // Get address of the box
2455
2456 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2457
2458 // Load the oop from the handle
2459 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2460
2461 __ resolve(IS_NOT_NULL, obj_reg);
2462 if (UseBiasedLocking) {
2463 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2464 }
2465
2466 // Load immediate 1 into swap_reg %rax
2467 __ movl(swap_reg, 1);
2468
2469 // Load (object->mark() | 1) into swap_reg %rax
2470 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2471
2472 // Save (object->mark() | 1) into BasicLock's displaced header
2473 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2474
2475 // src -> dest iff dest == rax else rax <- dest
2476 __ lock();
2477 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2478 __ jcc(Assembler::equal, lock_done);
2479
2480 // Hmm should this move to the slow path code area???
2481
2482 // Test if the oopMark is an obvious stack pointer, i.e.,
2483 // 1) (mark & 3) == 0, and
2484 // 2) rsp <= mark < mark + os::pagesize()
2485 // These 3 tests can be done by evaluating the following
2486 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2487 // assuming both stack pointer and pagesize have their
2488 // least significant 2 bits clear.
2489 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2490
2491 __ subptr(swap_reg, rsp);
2492 __ andptr(swap_reg, 3 - os::vm_page_size());
2493
2494 // Save the test result, for recursive case, the result is zero
2495 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2496 __ jcc(Assembler::notEqual, slow_path_lock);
2497
2498 // Slow path will re-enter here
2499
2500 __ bind(lock_done);
2501 }
2502
2503
2504 // Finally just about ready to make the JNI call
2505
2506
2507 // get JNIEnv* which is first argument to native
2508 if (!is_critical_native) {
2509 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2510 }
2511
2512 // Now set thread in native
2513 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2514
2515 __ call(RuntimeAddress(native_func));
2516
2517 // Verify or restore cpu control state after JNI call
2518 __ restore_cpu_control_state_after_jni();
2519
2520 // Unpack native results.
2521 switch (ret_type) {
2522 case T_BOOLEAN: __ c2bool(rax); break;
2523 case T_CHAR : __ movzwl(rax, rax); break;
2524 case T_BYTE : __ sign_extend_byte (rax); break;
2525 case T_SHORT : __ sign_extend_short(rax); break;
2526 case T_INT : /* nothing to do */ break;
2527 case T_DOUBLE :
2528 case T_FLOAT :
2529 // Result is in xmm0 we'll save as needed
2530 break;
2531 case T_ARRAY: // Really a handle
2532 case T_OBJECT: // Really a handle
2533 break; // can't de-handlize until after safepoint check
2534 case T_VOID: break;
2535 case T_LONG: break;
2536 default : ShouldNotReachHere();
2537 }
2538
2539 // unpin pinned arguments
2540 pinned_slot = oop_handle_offset;
2541 if (pinned_args.length() > 0) {
2542 // save return value that may be overwritten otherwise.
2543 save_native_result(masm, ret_type, stack_slots);
2544 for (int index = 0; index < pinned_args.length(); index ++) {
2545 int i = pinned_args.at(index);
2546 assert(pinned_slot <= stack_slots, "overflow");
2547 if (!in_regs[i].first()->is_stack()) {
2548 int offset = pinned_slot * VMRegImpl::stack_slot_size;
2549 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
2550 pinned_slot += VMRegImpl::slots_per_word;
2551 }
2552 gen_unpin_object(masm, in_regs[i]);
2553 }
2554 restore_native_result(masm, ret_type, stack_slots);
2555 }
2556
2557 // Switch thread to "native transition" state before reading the synchronization state.
2558 // This additional state is necessary because reading and testing the synchronization
2559 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2560 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2561 // VM thread changes sync state to synchronizing and suspends threads for GC.
2562 // Thread A is resumed to finish this native method, but doesn't block here since it
2563 // didn't see any synchronization is progress, and escapes.
2564 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2565
2566 // Force this write out before the read below
2567 __ membar(Assembler::Membar_mask_bits(
2568 Assembler::LoadLoad | Assembler::LoadStore |
2569 Assembler::StoreLoad | Assembler::StoreStore));
2570
2571 Label after_transition;
2572
2573 // check for safepoint operation in progress and/or pending suspend requests
2574 {
2575 Label Continue;
2576 Label slow_path;
2577
2578 __ safepoint_poll(slow_path, r15_thread, rscratch1);
2579
2580 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2581 __ jcc(Assembler::equal, Continue);
2582 __ bind(slow_path);
2583
2584 // Don't use call_VM as it will see a possible pending exception and forward it
2585 // and never return here preventing us from clearing _last_native_pc down below.
2586 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2587 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2588 // by hand.
2589 //
2590 __ vzeroupper();
2591 save_native_result(masm, ret_type, stack_slots);
2592 __ mov(c_rarg0, r15_thread);
2593 __ mov(r12, rsp); // remember sp
2594 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2595 __ andptr(rsp, -16); // align stack as required by ABI
2596 if (!is_critical_native) {
2597 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2598 } else {
2599 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2600 }
2601 __ mov(rsp, r12); // restore sp
2602 __ reinit_heapbase();
2603 // Restore any method result value
2604 restore_native_result(masm, ret_type, stack_slots);
2605
2606 if (is_critical_native) {
2607 // The call above performed the transition to thread_in_Java so
2608 // skip the transition logic below.
2609 __ jmpb(after_transition);
2610 }
2611
2612 __ bind(Continue);
2613 }
2614
2615 // change thread state
2616 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2617 __ bind(after_transition);
2618
2619 Label reguard;
2620 Label reguard_done;
2621 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2622 __ jcc(Assembler::equal, reguard);
2623 __ bind(reguard_done);
2624
2625 // native result if any is live
2626
2627 // Unlock
2628 Label unlock_done;
2629 Label slow_path_unlock;
2630 if (method->is_synchronized()) {
2631
2632 // Get locked oop from the handle we passed to jni
2633 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2634 __ resolve(IS_NOT_NULL, obj_reg);
2635
2636 Label done;
2637
2638 if (UseBiasedLocking) {
2639 __ biased_locking_exit(obj_reg, old_hdr, done);
2640 }
2641
2642 // Simple recursive lock?
2643
2644 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2645 __ jcc(Assembler::equal, done);
2646
2647 // Must save rax if if it is live now because cmpxchg must use it
2648 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2649 save_native_result(masm, ret_type, stack_slots);
2650 }
2651
2652
2653 // get address of the stack lock
2654 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2655 // get old displaced header
2656 __ movptr(old_hdr, Address(rax, 0));
2657
2658 // Atomic swap old header if oop still contains the stack lock
2659 __ lock();
2660 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2661 __ jcc(Assembler::notEqual, slow_path_unlock);
2662
2663 // slow path re-enters here
2664 __ bind(unlock_done);
2665 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2666 restore_native_result(masm, ret_type, stack_slots);
2667 }
2668
2669 __ bind(done);
2670
2671 }
2672 {
2673 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2674 save_native_result(masm, ret_type, stack_slots);
2675 __ mov_metadata(c_rarg1, method());
2676 __ call_VM_leaf(
2677 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2678 r15_thread, c_rarg1);
2679 restore_native_result(masm, ret_type, stack_slots);
2680 }
2681
2682 __ reset_last_Java_frame(false);
2683
2684 // Unbox oop result, e.g. JNIHandles::resolve value.
2685 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2686 __ resolve_jobject(rax /* value */,
2687 r15_thread /* thread */,
2688 rcx /* tmp */);
2689 }
2690
2691 if (CheckJNICalls) {
2692 // clear_pending_jni_exception_check
2693 __ movptr(Address(r15_thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
2694 }
2695
2696 if (!is_critical_native) {
2697 // reset handle block
2698 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2699 __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2700 }
2701
2702 // pop our frame
2703
2704 __ leave();
2705
2706 if (!is_critical_native) {
2707 // Any exception pending?
2708 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2709 __ jcc(Assembler::notEqual, exception_pending);
2710 }
2711
2712 // Return
2713
2714 __ ret(0);
2715
2716 // Unexpected paths are out of line and go here
2717
2718 if (!is_critical_native) {
2719 // forward the exception
2720 __ bind(exception_pending);
2721
2722 // and forward the exception
2723 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2724 }
2725
2726 // Slow path locking & unlocking
2727 if (method->is_synchronized()) {
2728
2729 // BEGIN Slow path lock
2730 __ bind(slow_path_lock);
2731
2732 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2733 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2734
2735 // protect the args we've loaded
2736 save_args(masm, total_c_args, c_arg, out_regs);
2737
2738 __ mov(c_rarg0, obj_reg);
2739 __ mov(c_rarg1, lock_reg);
2740 __ mov(c_rarg2, r15_thread);
2741
2742 // Not a leaf but we have last_Java_frame setup as we want
2743 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2744 restore_args(masm, total_c_args, c_arg, out_regs);
2745
2746 #ifdef ASSERT
2747 { Label L;
2748 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2749 __ jcc(Assembler::equal, L);
2750 __ stop("no pending exception allowed on exit from monitorenter");
2751 __ bind(L);
2752 }
2753 #endif
2754 __ jmp(lock_done);
2755
2756 // END Slow path lock
2757
2758 // BEGIN Slow path unlock
2759 __ bind(slow_path_unlock);
2760
2761 // If we haven't already saved the native result we must save it now as xmm registers
2762 // are still exposed.
2763 __ vzeroupper();
2764 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2765 save_native_result(masm, ret_type, stack_slots);
2766 }
2767
2768 __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2769
2770 __ mov(c_rarg0, obj_reg);
2771 __ mov(c_rarg2, r15_thread);
2772 __ mov(r12, rsp); // remember sp
2773 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2774 __ andptr(rsp, -16); // align stack as required by ABI
2775
2776 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2777 // NOTE that obj_reg == rbx currently
2778 __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2779 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2780
2781 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2782 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2783 __ mov(rsp, r12); // restore sp
2784 __ reinit_heapbase();
2785 #ifdef ASSERT
2786 {
2787 Label L;
2788 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2789 __ jcc(Assembler::equal, L);
2790 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2791 __ bind(L);
2792 }
2793 #endif /* ASSERT */
2794
2795 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
2796
2797 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2798 restore_native_result(masm, ret_type, stack_slots);
2799 }
2800 __ jmp(unlock_done);
2801
2802 // END Slow path unlock
2803
2804 } // synchronized
2805
2806 // SLOW PATH Reguard the stack if needed
2807
2808 __ bind(reguard);
2809 __ vzeroupper();
2810 save_native_result(masm, ret_type, stack_slots);
2811 __ mov(r12, rsp); // remember sp
2812 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2813 __ andptr(rsp, -16); // align stack as required by ABI
2814 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2815 __ mov(rsp, r12); // restore sp
2816 __ reinit_heapbase();
2817 restore_native_result(masm, ret_type, stack_slots);
2818 // and continue
2819 __ jmp(reguard_done);
2820
2821
2822
2823 __ flush();
2824
2825 nmethod *nm = nmethod::new_native_nmethod(method,
2826 compile_id,
2827 masm->code(),
2828 vep_offset,
2829 frame_complete,
2830 stack_slots / VMRegImpl::slots_per_word,
2831 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2832 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2833 oop_maps);
2834
2835 if (is_critical_native) {
2836 nm->set_lazy_critical_native(true);
2837 }
2838
2839 return nm;
2840
2841 }
2842
2843 // this function returns the adjust size (in number of words) to a c2i adapter
2844 // activation for use during deoptimization
last_frame_adjust(int callee_parameters,int callee_locals)2845 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2846 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2847 }
2848
2849
out_preserve_stack_slots()2850 uint SharedRuntime::out_preserve_stack_slots() {
2851 return 0;
2852 }
2853
2854 //------------------------------generate_deopt_blob----------------------------
generate_deopt_blob()2855 void SharedRuntime::generate_deopt_blob() {
2856 // Allocate space for the code
2857 ResourceMark rm;
2858 // Setup code generation tools
2859 int pad = 0;
2860 #if INCLUDE_JVMCI
2861 if (EnableJVMCI || UseAOT) {
2862 pad += 512; // Increase the buffer size when compiling for JVMCI
2863 }
2864 #endif
2865 CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
2866 MacroAssembler* masm = new MacroAssembler(&buffer);
2867 int frame_size_in_words;
2868 OopMap* map = NULL;
2869 OopMapSet *oop_maps = new OopMapSet();
2870
2871 // -------------
2872 // This code enters when returning to a de-optimized nmethod. A return
2873 // address has been pushed on the the stack, and return values are in
2874 // registers.
2875 // If we are doing a normal deopt then we were called from the patched
2876 // nmethod from the point we returned to the nmethod. So the return
2877 // address on the stack is wrong by NativeCall::instruction_size
2878 // We will adjust the value so it looks like we have the original return
2879 // address on the stack (like when we eagerly deoptimized).
2880 // In the case of an exception pending when deoptimizing, we enter
2881 // with a return address on the stack that points after the call we patched
2882 // into the exception handler. We have the following register state from,
2883 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2884 // rax: exception oop
2885 // rbx: exception handler
2886 // rdx: throwing pc
2887 // So in this case we simply jam rdx into the useless return address and
2888 // the stack looks just like we want.
2889 //
2890 // At this point we need to de-opt. We save the argument return
2891 // registers. We call the first C routine, fetch_unroll_info(). This
2892 // routine captures the return values and returns a structure which
2893 // describes the current frame size and the sizes of all replacement frames.
2894 // The current frame is compiled code and may contain many inlined
2895 // functions, each with their own JVM state. We pop the current frame, then
2896 // push all the new frames. Then we call the C routine unpack_frames() to
2897 // populate these frames. Finally unpack_frames() returns us the new target
2898 // address. Notice that callee-save registers are BLOWN here; they have
2899 // already been captured in the vframeArray at the time the return PC was
2900 // patched.
2901 address start = __ pc();
2902 Label cont;
2903
2904 // Prolog for non exception case!
2905
2906 // Save everything in sight.
2907 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2908
2909 // Normal deoptimization. Save exec mode for unpack_frames.
2910 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
2911 __ jmp(cont);
2912
2913 int reexecute_offset = __ pc() - start;
2914 #if INCLUDE_JVMCI && !defined(COMPILER1)
2915 if (EnableJVMCI && UseJVMCICompiler) {
2916 // JVMCI does not use this kind of deoptimization
2917 __ should_not_reach_here();
2918 }
2919 #endif
2920
2921 // Reexecute case
2922 // return address is the pc describes what bci to do re-execute at
2923
2924 // No need to update map as each call to save_live_registers will produce identical oopmap
2925 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2926
2927 __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
2928 __ jmp(cont);
2929
2930 #if INCLUDE_JVMCI
2931 Label after_fetch_unroll_info_call;
2932 int implicit_exception_uncommon_trap_offset = 0;
2933 int uncommon_trap_offset = 0;
2934
2935 if (EnableJVMCI || UseAOT) {
2936 implicit_exception_uncommon_trap_offset = __ pc() - start;
2937
2938 __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2939 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), (int32_t)NULL_WORD);
2940
2941 uncommon_trap_offset = __ pc() - start;
2942
2943 // Save everything in sight.
2944 RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2945 // fetch_unroll_info needs to call last_java_frame()
2946 __ set_last_Java_frame(noreg, noreg, NULL);
2947
2948 __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())));
2949 __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1);
2950
2951 __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute);
2952 __ mov(c_rarg0, r15_thread);
2953 __ movl(c_rarg2, r14); // exec mode
2954 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2955 oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2956
2957 __ reset_last_Java_frame(false);
2958
2959 __ jmp(after_fetch_unroll_info_call);
2960 } // EnableJVMCI
2961 #endif // INCLUDE_JVMCI
2962
2963 int exception_offset = __ pc() - start;
2964
2965 // Prolog for exception case
2966
2967 // all registers are dead at this entry point, except for rax, and
2968 // rdx which contain the exception oop and exception pc
2969 // respectively. Set them in TLS and fall thru to the
2970 // unpack_with_exception_in_tls entry point.
2971
2972 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
2973 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
2974
2975 int exception_in_tls_offset = __ pc() - start;
2976
2977 // new implementation because exception oop is now passed in JavaThread
2978
2979 // Prolog for exception case
2980 // All registers must be preserved because they might be used by LinearScan
2981 // Exceptiop oop and throwing PC are passed in JavaThread
2982 // tos: stack at point of call to method that threw the exception (i.e. only
2983 // args are on the stack, no return address)
2984
2985 // make room on stack for the return address
2986 // It will be patched later with the throwing pc. The correct value is not
2987 // available now because loading it from memory would destroy registers.
2988 __ push(0);
2989
2990 // Save everything in sight.
2991 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2992
2993 // Now it is safe to overwrite any register
2994
2995 // Deopt during an exception. Save exec mode for unpack_frames.
2996 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
2997
2998 // load throwing pc from JavaThread and patch it as the return address
2999 // of the current frame. Then clear the field in JavaThread
3000
3001 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3002 __ movptr(Address(rbp, wordSize), rdx);
3003 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3004
3005 #ifdef ASSERT
3006 // verify that there is really an exception oop in JavaThread
3007 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3008 __ verify_oop(rax);
3009
3010 // verify that there is no pending exception
3011 Label no_pending_exception;
3012 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3013 __ testptr(rax, rax);
3014 __ jcc(Assembler::zero, no_pending_exception);
3015 __ stop("must not have pending exception here");
3016 __ bind(no_pending_exception);
3017 #endif
3018
3019 __ bind(cont);
3020
3021 // Call C code. Need thread and this frame, but NOT official VM entry
3022 // crud. We cannot block on this call, no GC can happen.
3023 //
3024 // UnrollBlock* fetch_unroll_info(JavaThread* thread)
3025
3026 // fetch_unroll_info needs to call last_java_frame().
3027
3028 __ set_last_Java_frame(noreg, noreg, NULL);
3029 #ifdef ASSERT
3030 { Label L;
3031 __ cmpptr(Address(r15_thread,
3032 JavaThread::last_Java_fp_offset()),
3033 (int32_t)0);
3034 __ jcc(Assembler::equal, L);
3035 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
3036 __ bind(L);
3037 }
3038 #endif // ASSERT
3039 __ mov(c_rarg0, r15_thread);
3040 __ movl(c_rarg1, r14); // exec_mode
3041 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
3042
3043 // Need to have an oopmap that tells fetch_unroll_info where to
3044 // find any register it might need.
3045 oop_maps->add_gc_map(__ pc() - start, map);
3046
3047 __ reset_last_Java_frame(false);
3048
3049 #if INCLUDE_JVMCI
3050 if (EnableJVMCI || UseAOT) {
3051 __ bind(after_fetch_unroll_info_call);
3052 }
3053 #endif
3054
3055 // Load UnrollBlock* into rdi
3056 __ mov(rdi, rax);
3057
3058 __ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
3059 Label noException;
3060 __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
3061 __ jcc(Assembler::notEqual, noException);
3062 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3063 // QQQ this is useless it was NULL above
3064 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3065 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
3066 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3067
3068 __ verify_oop(rax);
3069
3070 // Overwrite the result registers with the exception results.
3071 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3072 // I think this is useless
3073 __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
3074
3075 __ bind(noException);
3076
3077 // Only register save data is on the stack.
3078 // Now restore the result registers. Everything else is either dead
3079 // or captured in the vframeArray.
3080 RegisterSaver::restore_result_registers(masm);
3081
3082 // All of the register save area has been popped of the stack. Only the
3083 // return address remains.
3084
3085 // Pop all the frames we must move/replace.
3086 //
3087 // Frame picture (youngest to oldest)
3088 // 1: self-frame (no frame link)
3089 // 2: deopting frame (no frame link)
3090 // 3: caller of deopting frame (could be compiled/interpreted).
3091 //
3092 // Note: by leaving the return address of self-frame on the stack
3093 // and using the size of frame 2 to adjust the stack
3094 // when we are done the return to frame 3 will still be on the stack.
3095
3096 // Pop deoptimized frame
3097 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3098 __ addptr(rsp, rcx);
3099
3100 // rsp should be pointing at the return address to the caller (3)
3101
3102 // Pick up the initial fp we should save
3103 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3104 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3105
3106 #ifdef ASSERT
3107 // Compilers generate code that bang the stack by as much as the
3108 // interpreter would need. So this stack banging should never
3109 // trigger a fault. Verify that it does not on non product builds.
3110 if (UseStackBanging) {
3111 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3112 __ bang_stack_size(rbx, rcx);
3113 }
3114 #endif
3115
3116 // Load address of array of frame pcs into rcx
3117 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3118
3119 // Trash the old pc
3120 __ addptr(rsp, wordSize);
3121
3122 // Load address of array of frame sizes into rsi
3123 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3124
3125 // Load counter into rdx
3126 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3127
3128 // Now adjust the caller's stack to make up for the extra locals
3129 // but record the original sp so that we can save it in the skeletal interpreter
3130 // frame and the stack walking of interpreter_sender will get the unextended sp
3131 // value and not the "real" sp value.
3132
3133 const Register sender_sp = r8;
3134
3135 __ mov(sender_sp, rsp);
3136 __ movl(rbx, Address(rdi,
3137 Deoptimization::UnrollBlock::
3138 caller_adjustment_offset_in_bytes()));
3139 __ subptr(rsp, rbx);
3140
3141 // Push interpreter frames in a loop
3142 Label loop;
3143 __ bind(loop);
3144 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3145 __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand
3146 __ pushptr(Address(rcx, 0)); // Save return address
3147 __ enter(); // Save old & set new ebp
3148 __ subptr(rsp, rbx); // Prolog
3149 // This value is corrected by layout_activation_impl
3150 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3151 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3152 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3153 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3154 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3155 __ decrementl(rdx); // Decrement counter
3156 __ jcc(Assembler::notZero, loop);
3157 __ pushptr(Address(rcx, 0)); // Save final return address
3158
3159 // Re-push self-frame
3160 __ enter(); // Save old & set new ebp
3161
3162 // Allocate a full sized register save area.
3163 // Return address and rbp are in place, so we allocate two less words.
3164 __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3165
3166 // Restore frame locals after moving the frame
3167 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3168 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3169
3170 // Call C code. Need thread but NOT official VM entry
3171 // crud. We cannot block on this call, no GC can happen. Call should
3172 // restore return values to their stack-slots with the new SP.
3173 //
3174 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3175
3176 // Use rbp because the frames look interpreted now
3177 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3178 // Don't need the precise return PC here, just precise enough to point into this code blob.
3179 address the_pc = __ pc();
3180 __ set_last_Java_frame(noreg, rbp, the_pc);
3181
3182 __ andptr(rsp, -(StackAlignmentInBytes)); // Fix stack alignment as required by ABI
3183 __ mov(c_rarg0, r15_thread);
3184 __ movl(c_rarg1, r14); // second arg: exec_mode
3185 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3186 // Revert SP alignment after call since we're going to do some SP relative addressing below
3187 __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3188
3189 // Set an oopmap for the call site
3190 // Use the same PC we used for the last java frame
3191 oop_maps->add_gc_map(the_pc - start,
3192 new OopMap( frame_size_in_words, 0 ));
3193
3194 // Clear fp AND pc
3195 __ reset_last_Java_frame(true);
3196
3197 // Collect return values
3198 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3199 __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3200 // I think this is useless (throwing pc?)
3201 __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3202
3203 // Pop self-frame.
3204 __ leave(); // Epilog
3205
3206 // Jump to interpreter
3207 __ ret(0);
3208
3209 // Make sure all code is generated
3210 masm->flush();
3211
3212 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3213 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3214 #if INCLUDE_JVMCI
3215 if (EnableJVMCI || UseAOT) {
3216 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
3217 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
3218 }
3219 #endif
3220 }
3221
3222 #ifdef COMPILER2
3223 //------------------------------generate_uncommon_trap_blob--------------------
generate_uncommon_trap_blob()3224 void SharedRuntime::generate_uncommon_trap_blob() {
3225 // Allocate space for the code
3226 ResourceMark rm;
3227 // Setup code generation tools
3228 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3229 MacroAssembler* masm = new MacroAssembler(&buffer);
3230
3231 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3232
3233 address start = __ pc();
3234
3235 if (UseRTMLocking) {
3236 // Abort RTM transaction before possible nmethod deoptimization.
3237 __ xabort(0);
3238 }
3239
3240 // Push self-frame. We get here with a return address on the
3241 // stack, so rsp is 8-byte aligned until we allocate our frame.
3242 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3243
3244 // No callee saved registers. rbp is assumed implicitly saved
3245 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3246
3247 // compiler left unloaded_class_index in j_rarg0 move to where the
3248 // runtime expects it.
3249 __ movl(c_rarg1, j_rarg0);
3250
3251 __ set_last_Java_frame(noreg, noreg, NULL);
3252
3253 // Call C code. Need thread but NOT official VM entry
3254 // crud. We cannot block on this call, no GC can happen. Call should
3255 // capture callee-saved registers as well as return values.
3256 // Thread is in rdi already.
3257 //
3258 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3259
3260 __ mov(c_rarg0, r15_thread);
3261 __ movl(c_rarg2, Deoptimization::Unpack_uncommon_trap);
3262 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3263
3264 // Set an oopmap for the call site
3265 OopMapSet* oop_maps = new OopMapSet();
3266 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3267
3268 // location of rbp is known implicitly by the frame sender code
3269
3270 oop_maps->add_gc_map(__ pc() - start, map);
3271
3272 __ reset_last_Java_frame(false);
3273
3274 // Load UnrollBlock* into rdi
3275 __ mov(rdi, rax);
3276
3277 #ifdef ASSERT
3278 { Label L;
3279 __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
3280 (int32_t)Deoptimization::Unpack_uncommon_trap);
3281 __ jcc(Assembler::equal, L);
3282 __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3283 __ bind(L);
3284 }
3285 #endif
3286
3287 // Pop all the frames we must move/replace.
3288 //
3289 // Frame picture (youngest to oldest)
3290 // 1: self-frame (no frame link)
3291 // 2: deopting frame (no frame link)
3292 // 3: caller of deopting frame (could be compiled/interpreted).
3293
3294 // Pop self-frame. We have no frame, and must rely only on rax and rsp.
3295 __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3296
3297 // Pop deoptimized frame (int)
3298 __ movl(rcx, Address(rdi,
3299 Deoptimization::UnrollBlock::
3300 size_of_deoptimized_frame_offset_in_bytes()));
3301 __ addptr(rsp, rcx);
3302
3303 // rsp should be pointing at the return address to the caller (3)
3304
3305 // Pick up the initial fp we should save
3306 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3307 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3308
3309 #ifdef ASSERT
3310 // Compilers generate code that bang the stack by as much as the
3311 // interpreter would need. So this stack banging should never
3312 // trigger a fault. Verify that it does not on non product builds.
3313 if (UseStackBanging) {
3314 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3315 __ bang_stack_size(rbx, rcx);
3316 }
3317 #endif
3318
3319 // Load address of array of frame pcs into rcx (address*)
3320 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3321
3322 // Trash the return pc
3323 __ addptr(rsp, wordSize);
3324
3325 // Load address of array of frame sizes into rsi (intptr_t*)
3326 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
3327
3328 // Counter
3329 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
3330
3331 // Now adjust the caller's stack to make up for the extra locals but
3332 // record the original sp so that we can save it in the skeletal
3333 // interpreter frame and the stack walking of interpreter_sender
3334 // will get the unextended sp value and not the "real" sp value.
3335
3336 const Register sender_sp = r8;
3337
3338 __ mov(sender_sp, rsp);
3339 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
3340 __ subptr(rsp, rbx);
3341
3342 // Push interpreter frames in a loop
3343 Label loop;
3344 __ bind(loop);
3345 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3346 __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand
3347 __ pushptr(Address(rcx, 0)); // Save return address
3348 __ enter(); // Save old & set new rbp
3349 __ subptr(rsp, rbx); // Prolog
3350 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3351 sender_sp); // Make it walkable
3352 // This value is corrected by layout_activation_impl
3353 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3354 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3355 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3356 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3357 __ decrementl(rdx); // Decrement counter
3358 __ jcc(Assembler::notZero, loop);
3359 __ pushptr(Address(rcx, 0)); // Save final return address
3360
3361 // Re-push self-frame
3362 __ enter(); // Save old & set new rbp
3363 __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3364 // Prolog
3365
3366 // Use rbp because the frames look interpreted now
3367 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3368 // Don't need the precise return PC here, just precise enough to point into this code blob.
3369 address the_pc = __ pc();
3370 __ set_last_Java_frame(noreg, rbp, the_pc);
3371
3372 // Call C code. Need thread but NOT official VM entry
3373 // crud. We cannot block on this call, no GC can happen. Call should
3374 // restore return values to their stack-slots with the new SP.
3375 // Thread is in rdi already.
3376 //
3377 // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3378
3379 __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3380 __ mov(c_rarg0, r15_thread);
3381 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3382 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3383
3384 // Set an oopmap for the call site
3385 // Use the same PC we used for the last java frame
3386 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3387
3388 // Clear fp AND pc
3389 __ reset_last_Java_frame(true);
3390
3391 // Pop self-frame.
3392 __ leave(); // Epilog
3393
3394 // Jump to interpreter
3395 __ ret(0);
3396
3397 // Make sure all code is generated
3398 masm->flush();
3399
3400 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps,
3401 SimpleRuntimeFrame::framesize >> 1);
3402 }
3403 #endif // COMPILER2
3404
3405
3406 //------------------------------generate_handler_blob------
3407 //
3408 // Generate a special Compile2Runtime blob that saves all registers,
3409 // and setup oopmap.
3410 //
generate_handler_blob(address call_ptr,int poll_type)3411 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3412 assert(StubRoutines::forward_exception_entry() != NULL,
3413 "must be generated before");
3414
3415 ResourceMark rm;
3416 OopMapSet *oop_maps = new OopMapSet();
3417 OopMap* map;
3418
3419 // Allocate space for the code. Setup code generation tools.
3420 CodeBuffer buffer("handler_blob", 2048, 1024);
3421 MacroAssembler* masm = new MacroAssembler(&buffer);
3422
3423 address start = __ pc();
3424 address call_pc = NULL;
3425 int frame_size_in_words;
3426 bool cause_return = (poll_type == POLL_AT_RETURN);
3427 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3428
3429 if (UseRTMLocking) {
3430 // Abort RTM transaction before calling runtime
3431 // because critical section will be large and will be
3432 // aborted anyway. Also nmethod could be deoptimized.
3433 __ xabort(0);
3434 }
3435
3436 // Make room for return address (or push it again)
3437 if (!cause_return) {
3438 __ push(rbx);
3439 }
3440
3441 // Save registers, fpu state, and flags
3442 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3443
3444 // The following is basically a call_VM. However, we need the precise
3445 // address of the call in order to generate an oopmap. Hence, we do all the
3446 // work outselves.
3447
3448 __ set_last_Java_frame(noreg, noreg, NULL);
3449
3450 // The return address must always be correct so that frame constructor never
3451 // sees an invalid pc.
3452
3453 if (!cause_return) {
3454 // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
3455 // Additionally, rbx is a callee saved register and we can look at it later to determine
3456 // if someone changed the return address for us!
3457 __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3458 __ movptr(Address(rbp, wordSize), rbx);
3459 }
3460
3461 // Do the call
3462 __ mov(c_rarg0, r15_thread);
3463 __ call(RuntimeAddress(call_ptr));
3464
3465 // Set an oopmap for the call site. This oopmap will map all
3466 // oop-registers and debug-info registers as callee-saved. This
3467 // will allow deoptimization at this safepoint to find all possible
3468 // debug-info recordings, as well as let GC find all oops.
3469
3470 oop_maps->add_gc_map( __ pc() - start, map);
3471
3472 Label noException;
3473
3474 __ reset_last_Java_frame(false);
3475
3476 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3477 __ jcc(Assembler::equal, noException);
3478
3479 // Exception pending
3480
3481 RegisterSaver::restore_live_registers(masm, save_vectors);
3482
3483 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3484
3485 // No exception case
3486 __ bind(noException);
3487
3488 Label no_adjust;
3489 #ifdef ASSERT
3490 Label bail;
3491 #endif
3492 if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
3493 Label no_prefix, not_special;
3494
3495 // If our stashed return pc was modified by the runtime we avoid touching it
3496 __ cmpptr(rbx, Address(rbp, wordSize));
3497 __ jccb(Assembler::notEqual, no_adjust);
3498
3499 // Skip over the poll instruction.
3500 // See NativeInstruction::is_safepoint_poll()
3501 // Possible encodings:
3502 // 85 00 test %eax,(%rax)
3503 // 85 01 test %eax,(%rcx)
3504 // 85 02 test %eax,(%rdx)
3505 // 85 03 test %eax,(%rbx)
3506 // 85 06 test %eax,(%rsi)
3507 // 85 07 test %eax,(%rdi)
3508 //
3509 // 41 85 00 test %eax,(%r8)
3510 // 41 85 01 test %eax,(%r9)
3511 // 41 85 02 test %eax,(%r10)
3512 // 41 85 03 test %eax,(%r11)
3513 // 41 85 06 test %eax,(%r14)
3514 // 41 85 07 test %eax,(%r15)
3515 //
3516 // 85 04 24 test %eax,(%rsp)
3517 // 41 85 04 24 test %eax,(%r12)
3518 // 85 45 00 test %eax,0x0(%rbp)
3519 // 41 85 45 00 test %eax,0x0(%r13)
3520
3521 __ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
3522 __ jcc(Assembler::notEqual, no_prefix);
3523 __ addptr(rbx, 1);
3524 __ bind(no_prefix);
3525 #ifdef ASSERT
3526 __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
3527 #endif
3528 // r12/r13/rsp/rbp base encoding takes 3 bytes with the following register values:
3529 // r12/rsp 0x04
3530 // r13/rbp 0x05
3531 __ movzbq(rcx, Address(rbx, 1));
3532 __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
3533 __ subptr(rcx, 4); // looking for 0x00 .. 0x01
3534 __ cmpptr(rcx, 1);
3535 __ jcc(Assembler::above, not_special);
3536 __ addptr(rbx, 1);
3537 __ bind(not_special);
3538 #ifdef ASSERT
3539 // Verify the correct encoding of the poll we're about to skip.
3540 __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
3541 __ jcc(Assembler::notEqual, bail);
3542 // Mask out the modrm bits
3543 __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
3544 // rax encodes to 0, so if the bits are nonzero it's incorrect
3545 __ jcc(Assembler::notZero, bail);
3546 #endif
3547 // Adjust return pc forward to step over the safepoint poll instruction
3548 __ addptr(rbx, 2);
3549 __ movptr(Address(rbp, wordSize), rbx);
3550 }
3551
3552 __ bind(no_adjust);
3553 // Normal exit, restore registers and exit.
3554 RegisterSaver::restore_live_registers(masm, save_vectors);
3555 __ ret(0);
3556
3557 #ifdef ASSERT
3558 __ bind(bail);
3559 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
3560 #endif
3561
3562 // Make sure all code is generated
3563 masm->flush();
3564
3565 // Fill-out other meta info
3566 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3567 }
3568
3569 //
3570 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3571 //
3572 // Generate a stub that calls into vm to find out the proper destination
3573 // of a java call. All the argument registers are live at this point
3574 // but since this is generic code we don't know what they are and the caller
3575 // must do any gc of the args.
3576 //
generate_resolve_blob(address destination,const char * name)3577 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3578 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3579
3580 // allocate space for the code
3581 ResourceMark rm;
3582
3583 CodeBuffer buffer(name, 1000, 512);
3584 MacroAssembler* masm = new MacroAssembler(&buffer);
3585
3586 int frame_size_in_words;
3587
3588 OopMapSet *oop_maps = new OopMapSet();
3589 OopMap* map = NULL;
3590
3591 int start = __ offset();
3592
3593 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3594
3595 int frame_complete = __ offset();
3596
3597 __ set_last_Java_frame(noreg, noreg, NULL);
3598
3599 __ mov(c_rarg0, r15_thread);
3600
3601 __ call(RuntimeAddress(destination));
3602
3603
3604 // Set an oopmap for the call site.
3605 // We need this not only for callee-saved registers, but also for volatile
3606 // registers that the compiler might be keeping live across a safepoint.
3607
3608 oop_maps->add_gc_map( __ offset() - start, map);
3609
3610 // rax contains the address we are going to jump to assuming no exception got installed
3611
3612 // clear last_Java_sp
3613 __ reset_last_Java_frame(false);
3614 // check for pending exceptions
3615 Label pending;
3616 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3617 __ jcc(Assembler::notEqual, pending);
3618
3619 // get the returned Method*
3620 __ get_vm_result_2(rbx, r15_thread);
3621 __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3622
3623 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3624
3625 RegisterSaver::restore_live_registers(masm);
3626
3627 // We are back the the original state on entry and ready to go.
3628
3629 __ jmp(rax);
3630
3631 // Pending exception after the safepoint
3632
3633 __ bind(pending);
3634
3635 RegisterSaver::restore_live_registers(masm);
3636
3637 // exception pending => remove activation and forward to exception handler
3638
3639 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3640
3641 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3642 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3643
3644 // -------------
3645 // make sure all code is generated
3646 masm->flush();
3647
3648 // return the blob
3649 // frame_size_words or bytes??
3650 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3651 }
3652
3653
3654 //------------------------------Montgomery multiplication------------------------
3655 //
3656
3657 #ifndef _WINDOWS
3658
3659 #define ASM_SUBTRACT
3660
3661 #ifdef ASM_SUBTRACT
3662 // Subtract 0:b from carry:a. Return carry.
3663 static unsigned long
sub(unsigned long a[],unsigned long b[],unsigned long carry,long len)3664 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
3665 long i = 0, cnt = len;
3666 unsigned long tmp;
3667 asm volatile("clc; "
3668 "0: ; "
3669 "mov (%[b], %[i], 8), %[tmp]; "
3670 "sbb %[tmp], (%[a], %[i], 8); "
3671 "inc %[i]; dec %[cnt]; "
3672 "jne 0b; "
3673 "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3674 : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3675 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3676 : "memory");
3677 return tmp;
3678 }
3679 #else // ASM_SUBTRACT
3680 typedef int __attribute__((mode(TI))) int128;
3681
3682 // Subtract 0:b from carry:a. Return carry.
3683 static unsigned long
sub(unsigned long a[],unsigned long b[],unsigned long carry,int len)3684 sub(unsigned long a[], unsigned long b[], unsigned long carry, int len) {
3685 int128 tmp = 0;
3686 int i;
3687 for (i = 0; i < len; i++) {
3688 tmp += a[i];
3689 tmp -= b[i];
3690 a[i] = tmp;
3691 tmp >>= 64;
3692 assert(-1 <= tmp && tmp <= 0, "invariant");
3693 }
3694 return tmp + carry;
3695 }
3696 #endif // ! ASM_SUBTRACT
3697
3698 // Multiply (unsigned) Long A by Long B, accumulating the double-
3699 // length result into the accumulator formed of T0, T1, and T2.
3700 #define MACC(A, B, T0, T1, T2) \
3701 do { \
3702 unsigned long hi, lo; \
3703 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
3704 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
3705 : "r"(A), "a"(B) : "cc"); \
3706 } while(0)
3707
3708 // As above, but add twice the double-length result into the
3709 // accumulator.
3710 #define MACC2(A, B, T0, T1, T2) \
3711 do { \
3712 unsigned long hi, lo; \
3713 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4; " \
3714 "add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
3715 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
3716 : "r"(A), "a"(B) : "cc"); \
3717 } while(0)
3718
3719 // Fast Montgomery multiplication. The derivation of the algorithm is
3720 // in A Cryptographic Library for the Motorola DSP56000,
3721 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237.
3722
3723 static void __attribute__((noinline))
montgomery_multiply(unsigned long a[],unsigned long b[],unsigned long n[],unsigned long m[],unsigned long inv,int len)3724 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
3725 unsigned long m[], unsigned long inv, int len) {
3726 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3727 int i;
3728
3729 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3730
3731 for (i = 0; i < len; i++) {
3732 int j;
3733 for (j = 0; j < i; j++) {
3734 MACC(a[j], b[i-j], t0, t1, t2);
3735 MACC(m[j], n[i-j], t0, t1, t2);
3736 }
3737 MACC(a[i], b[0], t0, t1, t2);
3738 m[i] = t0 * inv;
3739 MACC(m[i], n[0], t0, t1, t2);
3740
3741 assert(t0 == 0, "broken Montgomery multiply");
3742
3743 t0 = t1; t1 = t2; t2 = 0;
3744 }
3745
3746 for (i = len; i < 2*len; i++) {
3747 int j;
3748 for (j = i-len+1; j < len; j++) {
3749 MACC(a[j], b[i-j], t0, t1, t2);
3750 MACC(m[j], n[i-j], t0, t1, t2);
3751 }
3752 m[i-len] = t0;
3753 t0 = t1; t1 = t2; t2 = 0;
3754 }
3755
3756 while (t0)
3757 t0 = sub(m, n, t0, len);
3758 }
3759
3760 // Fast Montgomery squaring. This uses asymptotically 25% fewer
3761 // multiplies so it should be up to 25% faster than Montgomery
3762 // multiplication. However, its loop control is more complex and it
3763 // may actually run slower on some machines.
3764
3765 static void __attribute__((noinline))
montgomery_square(unsigned long a[],unsigned long n[],unsigned long m[],unsigned long inv,int len)3766 montgomery_square(unsigned long a[], unsigned long n[],
3767 unsigned long m[], unsigned long inv, int len) {
3768 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3769 int i;
3770
3771 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3772
3773 for (i = 0; i < len; i++) {
3774 int j;
3775 int end = (i+1)/2;
3776 for (j = 0; j < end; j++) {
3777 MACC2(a[j], a[i-j], t0, t1, t2);
3778 MACC(m[j], n[i-j], t0, t1, t2);
3779 }
3780 if ((i & 1) == 0) {
3781 MACC(a[j], a[j], t0, t1, t2);
3782 }
3783 for (; j < i; j++) {
3784 MACC(m[j], n[i-j], t0, t1, t2);
3785 }
3786 m[i] = t0 * inv;
3787 MACC(m[i], n[0], t0, t1, t2);
3788
3789 assert(t0 == 0, "broken Montgomery square");
3790
3791 t0 = t1; t1 = t2; t2 = 0;
3792 }
3793
3794 for (i = len; i < 2*len; i++) {
3795 int start = i-len+1;
3796 int end = start + (len - start)/2;
3797 int j;
3798 for (j = start; j < end; j++) {
3799 MACC2(a[j], a[i-j], t0, t1, t2);
3800 MACC(m[j], n[i-j], t0, t1, t2);
3801 }
3802 if ((i & 1) == 0) {
3803 MACC(a[j], a[j], t0, t1, t2);
3804 }
3805 for (; j < len; j++) {
3806 MACC(m[j], n[i-j], t0, t1, t2);
3807 }
3808 m[i-len] = t0;
3809 t0 = t1; t1 = t2; t2 = 0;
3810 }
3811
3812 while (t0)
3813 t0 = sub(m, n, t0, len);
3814 }
3815
3816 // Swap words in a longword.
swap(unsigned long x)3817 static unsigned long swap(unsigned long x) {
3818 return (x << 32) | (x >> 32);
3819 }
3820
3821 // Copy len longwords from s to d, word-swapping as we go. The
3822 // destination array is reversed.
reverse_words(unsigned long * s,unsigned long * d,int len)3823 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
3824 d += len;
3825 while(len-- > 0) {
3826 d--;
3827 *d = swap(*s);
3828 s++;
3829 }
3830 }
3831
3832 // The threshold at which squaring is advantageous was determined
3833 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
3834 #define MONTGOMERY_SQUARING_THRESHOLD 64
3835
montgomery_multiply(jint * a_ints,jint * b_ints,jint * n_ints,jint len,jlong inv,jint * m_ints)3836 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
3837 jint len, jlong inv,
3838 jint *m_ints) {
3839 assert(len % 2 == 0, "array length in montgomery_multiply must be even");
3840 int longwords = len/2;
3841
3842 // Make very sure we don't use so much space that the stack might
3843 // overflow. 512 jints corresponds to an 16384-bit integer and
3844 // will use here a total of 8k bytes of stack space.
3845 int total_allocation = longwords * sizeof (unsigned long) * 4;
3846 guarantee(total_allocation <= 8192, "must be");
3847 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3848
3849 // Local scratch arrays
3850 unsigned long
3851 *a = scratch + 0 * longwords,
3852 *b = scratch + 1 * longwords,
3853 *n = scratch + 2 * longwords,
3854 *m = scratch + 3 * longwords;
3855
3856 reverse_words((unsigned long *)a_ints, a, longwords);
3857 reverse_words((unsigned long *)b_ints, b, longwords);
3858 reverse_words((unsigned long *)n_ints, n, longwords);
3859
3860 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
3861
3862 reverse_words(m, (unsigned long *)m_ints, longwords);
3863 }
3864
montgomery_square(jint * a_ints,jint * n_ints,jint len,jlong inv,jint * m_ints)3865 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
3866 jint len, jlong inv,
3867 jint *m_ints) {
3868 assert(len % 2 == 0, "array length in montgomery_square must be even");
3869 int longwords = len/2;
3870
3871 // Make very sure we don't use so much space that the stack might
3872 // overflow. 512 jints corresponds to an 16384-bit integer and
3873 // will use here a total of 6k bytes of stack space.
3874 int total_allocation = longwords * sizeof (unsigned long) * 3;
3875 guarantee(total_allocation <= 8192, "must be");
3876 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3877
3878 // Local scratch arrays
3879 unsigned long
3880 *a = scratch + 0 * longwords,
3881 *n = scratch + 1 * longwords,
3882 *m = scratch + 2 * longwords;
3883
3884 reverse_words((unsigned long *)a_ints, a, longwords);
3885 reverse_words((unsigned long *)n_ints, n, longwords);
3886
3887 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3888 ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
3889 } else {
3890 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
3891 }
3892
3893 reverse_words(m, (unsigned long *)m_ints, longwords);
3894 }
3895
3896 #endif // WINDOWS
3897
3898 #ifdef COMPILER2
3899 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
3900 //
3901 //------------------------------generate_exception_blob---------------------------
3902 // creates exception blob at the end
3903 // Using exception blob, this code is jumped from a compiled method.
3904 // (see emit_exception_handler in x86_64.ad file)
3905 //
3906 // Given an exception pc at a call we call into the runtime for the
3907 // handler in this method. This handler might merely restore state
3908 // (i.e. callee save registers) unwind the frame and jump to the
3909 // exception handler for the nmethod if there is no Java level handler
3910 // for the nmethod.
3911 //
3912 // This code is entered with a jmp.
3913 //
3914 // Arguments:
3915 // rax: exception oop
3916 // rdx: exception pc
3917 //
3918 // Results:
3919 // rax: exception oop
3920 // rdx: exception pc in caller or ???
3921 // destination: exception handler of caller
3922 //
3923 // Note: the exception pc MUST be at a call (precise debug information)
3924 // Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
3925 //
3926
generate_exception_blob()3927 void OptoRuntime::generate_exception_blob() {
3928 assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
3929 assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
3930 assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
3931
3932 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3933
3934 // Allocate space for the code
3935 ResourceMark rm;
3936 // Setup code generation tools
3937 CodeBuffer buffer("exception_blob", 2048, 1024);
3938 MacroAssembler* masm = new MacroAssembler(&buffer);
3939
3940
3941 address start = __ pc();
3942
3943 // Exception pc is 'return address' for stack walker
3944 __ push(rdx);
3945 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
3946
3947 // Save callee-saved registers. See x86_64.ad.
3948
3949 // rbp is an implicitly saved callee saved register (i.e., the calling
3950 // convention will save/restore it in the prolog/epilog). Other than that
3951 // there are no callee save registers now that adapter frames are gone.
3952
3953 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3954
3955 // Store exception in Thread object. We cannot pass any arguments to the
3956 // handle_exception call, since we do not want to make any assumption
3957 // about the size of the frame where the exception happened in.
3958 // c_rarg0 is either rdi (Linux) or rcx (Windows).
3959 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
3960 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3961
3962 // This call does all the hard work. It checks if an exception handler
3963 // exists in the method.
3964 // If so, it returns the handler address.
3965 // If not, it prepares for stack-unwinding, restoring the callee-save
3966 // registers of the frame being removed.
3967 //
3968 // address OptoRuntime::handle_exception_C(JavaThread* thread)
3969
3970 // At a method handle call, the stack may not be properly aligned
3971 // when returning with an exception.
3972 address the_pc = __ pc();
3973 __ set_last_Java_frame(noreg, noreg, the_pc);
3974 __ mov(c_rarg0, r15_thread);
3975 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
3976 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3977
3978 // Set an oopmap for the call site. This oopmap will only be used if we
3979 // are unwinding the stack. Hence, all locations will be dead.
3980 // Callee-saved registers will be the same as the frame above (i.e.,
3981 // handle_exception_stub), since they were restored when we got the
3982 // exception.
3983
3984 OopMapSet* oop_maps = new OopMapSet();
3985
3986 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3987
3988 __ reset_last_Java_frame(false);
3989
3990 // Restore callee-saved registers
3991
3992 // rbp is an implicitly saved callee-saved register (i.e., the calling
3993 // convention will save restore it in prolog/epilog) Other than that
3994 // there are no callee save registers now that adapter frames are gone.
3995
3996 __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
3997
3998 __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
3999 __ pop(rdx); // No need for exception pc anymore
4000
4001 // rax: exception handler
4002
4003 // We have a handler in rax (could be deopt blob).
4004 __ mov(r8, rax);
4005
4006 // Get the exception oop
4007 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4008 // Get the exception pc in case we are deoptimized
4009 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4010 #ifdef ASSERT
4011 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4012 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4013 #endif
4014 // Clear the exception oop so GC no longer processes it as a root.
4015 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4016
4017 // rax: exception oop
4018 // r8: exception handler
4019 // rdx: exception pc
4020 // Jump to handler
4021
4022 __ jmp(r8);
4023
4024 // Make sure all code is generated
4025 masm->flush();
4026
4027 // Set exception blob
4028 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4029 }
4030 #endif // COMPILER2
4031