1 /*
2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #if !defined(_WINDOWS) && !defined(_BSDONLY_SOURCE)
27 #include "alloca.h"
28 #endif
29 #ifdef _BSDONLY_SOURCE
30 #include <stdlib.h>
31 #endif
32 #include "asm/macroAssembler.hpp"
33 #include "asm/macroAssembler.inline.hpp"
34 #include "code/debugInfoRec.hpp"
35 #include "code/icBuffer.hpp"
36 #include "code/nativeInst.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "gc/shared/barrierSet.hpp"
41 #include "gc/shared/barrierSetAssembler.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "logging/log.hpp"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "oops/compiledICHolder.hpp"
47 #include "runtime/safepointMechanism.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "runtime/vframeArray.hpp"
50 #include "runtime/vm_version.hpp"
51 #include "utilities/align.hpp"
52 #include "utilities/formatBuffer.hpp"
53 #include "vmreg_x86.inline.hpp"
54 #ifdef COMPILER1
55 #include "c1/c1_Runtime1.hpp"
56 #endif
57 #ifdef COMPILER2
58 #include "opto/runtime.hpp"
59 #endif
60 #if INCLUDE_JVMCI
61 #include "jvmci/jvmciJavaClasses.hpp"
62 #endif
63
64 #define __ masm->
65
66 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
67
68 class SimpleRuntimeFrame {
69
70 public:
71
72 // Most of the runtime stubs have this simple frame layout.
73 // This class exists to make the layout shared in one place.
74 // Offsets are for compiler stack slots, which are jints.
75 enum layout {
76 // The frame sender code expects that rbp will be in the "natural" place and
77 // will override any oopMap setting for it. We must therefore force the layout
78 // so that it agrees with the frame sender code.
79 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
80 rbp_off2,
81 return_off, return_off2,
82 framesize
83 };
84 };
85
86 class RegisterSaver {
87 // Capture info about frame layout. Layout offsets are in jint
88 // units because compiler frame slots are jints.
89 #define XSAVE_AREA_BEGIN 160
90 #define XSAVE_AREA_YMM_BEGIN 576
91 #define XSAVE_AREA_ZMM_BEGIN 1152
92 #define XSAVE_AREA_UPPERBANK 1664
93 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
94 #define DEF_YMM_OFFS(regnum) ymm ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt, ymm ## regnum ## H_off
95 #define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off
96 enum layout {
97 fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
98 xmm_off = fpu_state_off + XSAVE_AREA_BEGIN/BytesPerInt, // offset in fxsave save area
99 DEF_XMM_OFFS(0),
100 DEF_XMM_OFFS(1),
101 // 2..15 are implied in range usage
102 ymm_off = xmm_off + (XSAVE_AREA_YMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
103 DEF_YMM_OFFS(0),
104 DEF_YMM_OFFS(1),
105 // 2..15 are implied in range usage
106 zmm_high = xmm_off + (XSAVE_AREA_ZMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
107 zmm_off = xmm_off + (XSAVE_AREA_UPPERBANK - XSAVE_AREA_BEGIN)/BytesPerInt,
108 DEF_ZMM_OFFS(16),
109 DEF_ZMM_OFFS(17),
110 // 18..31 are implied in range usage
111 fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
112 fpu_stateH_end,
113 r15_off, r15H_off,
114 r14_off, r14H_off,
115 r13_off, r13H_off,
116 r12_off, r12H_off,
117 r11_off, r11H_off,
118 r10_off, r10H_off,
119 r9_off, r9H_off,
120 r8_off, r8H_off,
121 rdi_off, rdiH_off,
122 rsi_off, rsiH_off,
123 ignore_off, ignoreH_off, // extra copy of rbp
124 rsp_off, rspH_off,
125 rbx_off, rbxH_off,
126 rdx_off, rdxH_off,
127 rcx_off, rcxH_off,
128 rax_off, raxH_off,
129 // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
130 align_off, alignH_off,
131 flags_off, flagsH_off,
132 // The frame sender code expects that rbp will be in the "natural" place and
133 // will override any oopMap setting for it. We must therefore force the layout
134 // so that it agrees with the frame sender code.
135 rbp_off, rbpH_off, // copy of rbp we will restore
136 return_off, returnH_off, // slot for return address
137 reg_save_size // size in compiler stack slots
138 };
139
140 public:
141 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
142 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
143
144 // Offsets into the register save area
145 // Used by deoptimization when it is managing result register
146 // values on its own
147
rax_offset_in_bytes(void)148 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
rdx_offset_in_bytes(void)149 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
rbx_offset_in_bytes(void)150 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
xmm0_offset_in_bytes(void)151 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
return_offset_in_bytes(void)152 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
153
154 // During deoptimization only the result registers need to be restored,
155 // all the other values have already been extracted.
156 static void restore_result_registers(MacroAssembler* masm);
157 };
158
save_live_registers(MacroAssembler * masm,int additional_frame_words,int * total_frame_words,bool save_vectors)159 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
160 int off = 0;
161 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
162 if (UseAVX < 3) {
163 num_xmm_regs = num_xmm_regs/2;
164 }
165 #if COMPILER2_OR_JVMCI
166 if (save_vectors) {
167 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
168 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
169 }
170 #else
171 assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
172 #endif
173
174 // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
175 int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
176 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
177 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
178 // CodeBlob frame size is in words.
179 int frame_size_in_words = frame_size_in_bytes / wordSize;
180 *total_frame_words = frame_size_in_words;
181
182 // Save registers, fpu state, and flags.
183 // We assume caller has already pushed the return address onto the
184 // stack, so rsp is 8-byte aligned here.
185 // We push rpb twice in this sequence because we want the real rbp
186 // to be under the return like a normal enter.
187
188 __ enter(); // rsp becomes 16-byte aligned here
189 __ push_CPU_state(); // Push a multiple of 16 bytes
190
191 // push cpu state handles this on EVEX enabled targets
192 if (save_vectors) {
193 // Save upper half of YMM registers(0..15)
194 int base_addr = XSAVE_AREA_YMM_BEGIN;
195 for (int n = 0; n < 16; n++) {
196 __ vextractf128_high(Address(rsp, base_addr+n*16), as_XMMRegister(n));
197 }
198 if (VM_Version::supports_evex()) {
199 // Save upper half of ZMM registers(0..15)
200 base_addr = XSAVE_AREA_ZMM_BEGIN;
201 for (int n = 0; n < 16; n++) {
202 __ vextractf64x4_high(Address(rsp, base_addr+n*32), as_XMMRegister(n));
203 }
204 // Save full ZMM registers(16..num_xmm_regs)
205 base_addr = XSAVE_AREA_UPPERBANK;
206 off = 0;
207 int vector_len = Assembler::AVX_512bit;
208 for (int n = 16; n < num_xmm_regs; n++) {
209 __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
210 }
211 }
212 } else {
213 if (VM_Version::supports_evex()) {
214 // Save upper bank of ZMM registers(16..31) for double/float usage
215 int base_addr = XSAVE_AREA_UPPERBANK;
216 off = 0;
217 for (int n = 16; n < num_xmm_regs; n++) {
218 __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
219 }
220 }
221 }
222 __ vzeroupper();
223 if (frame::arg_reg_save_area_bytes != 0) {
224 // Allocate argument register save area
225 __ subptr(rsp, frame::arg_reg_save_area_bytes);
226 }
227
228 // Set an oopmap for the call site. This oopmap will map all
229 // oop-registers and debug-info registers as callee-saved. This
230 // will allow deoptimization at this safepoint to find all possible
231 // debug-info recordings, as well as let GC find all oops.
232
233 OopMapSet *oop_maps = new OopMapSet();
234 OopMap* map = new OopMap(frame_size_in_slots, 0);
235
236 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x))
237
238 map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
239 map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
240 map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
241 map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
242 // rbp location is known implicitly by the frame sender code, needs no oopmap
243 // and the location where rbp was saved by is ignored
244 map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
245 map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
246 map->set_callee_saved(STACK_OFFSET( r8_off ), r8->as_VMReg());
247 map->set_callee_saved(STACK_OFFSET( r9_off ), r9->as_VMReg());
248 map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
249 map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
250 map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
251 map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
252 map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
253 map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
254 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
255 // on EVEX enabled targets, we get it included in the xsave area
256 off = xmm0_off;
257 int delta = xmm1_off - off;
258 for (int n = 0; n < 16; n++) {
259 XMMRegister xmm_name = as_XMMRegister(n);
260 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
261 off += delta;
262 }
263 if(UseAVX > 2) {
264 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
265 off = zmm16_off;
266 delta = zmm17_off - off;
267 for (int n = 16; n < num_xmm_regs; n++) {
268 XMMRegister zmm_name = as_XMMRegister(n);
269 map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
270 off += delta;
271 }
272 }
273
274 #if COMPILER2_OR_JVMCI
275 if (save_vectors) {
276 off = ymm0_off;
277 int delta = ymm1_off - off;
278 for (int n = 0; n < 16; n++) {
279 XMMRegister ymm_name = as_XMMRegister(n);
280 map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
281 off += delta;
282 }
283 }
284 #endif // COMPILER2_OR_JVMCI
285
286 // %%% These should all be a waste but we'll keep things as they were for now
287 if (true) {
288 map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
289 map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
290 map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
291 map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
292 // rbp location is known implicitly by the frame sender code, needs no oopmap
293 map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
294 map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
295 map->set_callee_saved(STACK_OFFSET( r8H_off ), r8->as_VMReg()->next());
296 map->set_callee_saved(STACK_OFFSET( r9H_off ), r9->as_VMReg()->next());
297 map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
298 map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
299 map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
300 map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
301 map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
302 map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
303 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
304 // on EVEX enabled targets, we get it included in the xsave area
305 off = xmm0H_off;
306 delta = xmm1H_off - off;
307 for (int n = 0; n < 16; n++) {
308 XMMRegister xmm_name = as_XMMRegister(n);
309 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next());
310 off += delta;
311 }
312 if (UseAVX > 2) {
313 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
314 off = zmm16H_off;
315 delta = zmm17H_off - off;
316 for (int n = 16; n < num_xmm_regs; n++) {
317 XMMRegister zmm_name = as_XMMRegister(n);
318 map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
319 off += delta;
320 }
321 }
322 }
323
324 return map;
325 }
326
restore_live_registers(MacroAssembler * masm,bool restore_vectors)327 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
328 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
329 if (UseAVX < 3) {
330 num_xmm_regs = num_xmm_regs/2;
331 }
332 if (frame::arg_reg_save_area_bytes != 0) {
333 // Pop arg register save area
334 __ addptr(rsp, frame::arg_reg_save_area_bytes);
335 }
336
337 #if COMPILER2_OR_JVMCI
338 if (restore_vectors) {
339 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
340 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
341 }
342 #else
343 assert(!restore_vectors, "vectors are generated only by C2");
344 #endif
345
346 __ vzeroupper();
347
348 // On EVEX enabled targets everything is handled in pop fpu state
349 if (restore_vectors) {
350 // Restore upper half of YMM registers (0..15)
351 int base_addr = XSAVE_AREA_YMM_BEGIN;
352 for (int n = 0; n < 16; n++) {
353 __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
354 }
355 if (VM_Version::supports_evex()) {
356 // Restore upper half of ZMM registers (0..15)
357 base_addr = XSAVE_AREA_ZMM_BEGIN;
358 for (int n = 0; n < 16; n++) {
359 __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, base_addr+n*32));
360 }
361 // Restore full ZMM registers(16..num_xmm_regs)
362 base_addr = XSAVE_AREA_UPPERBANK;
363 int vector_len = Assembler::AVX_512bit;
364 int off = 0;
365 for (int n = 16; n < num_xmm_regs; n++) {
366 __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
367 }
368 }
369 } else {
370 if (VM_Version::supports_evex()) {
371 // Restore upper bank of ZMM registers(16..31) for double/float usage
372 int base_addr = XSAVE_AREA_UPPERBANK;
373 int off = 0;
374 for (int n = 16; n < num_xmm_regs; n++) {
375 __ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
376 }
377 }
378 }
379
380 // Recover CPU state
381 __ pop_CPU_state();
382 // Get the rbp described implicitly by the calling convention (no oopMap)
383 __ pop(rbp);
384 }
385
restore_result_registers(MacroAssembler * masm)386 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
387
388 // Just restore result register. Only used by deoptimization. By
389 // now any callee save register that needs to be restored to a c2
390 // caller of the deoptee has been extracted into the vframeArray
391 // and will be stuffed into the c2i adapter we create for later
392 // restoration so only result registers need to be restored here.
393
394 // Restore fp result register
395 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
396 // Restore integer result register
397 __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
398 __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
399
400 // Pop all of the register save are off the stack except the return address
401 __ addptr(rsp, return_offset_in_bytes());
402 }
403
404 // Is vector's size (in bytes) bigger than a size saved by default?
405 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
is_wide_vector(int size)406 bool SharedRuntime::is_wide_vector(int size) {
407 return size > 16;
408 }
409
trampoline_size()410 size_t SharedRuntime::trampoline_size() {
411 return 16;
412 }
413
generate_trampoline(MacroAssembler * masm,address destination)414 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
415 __ jump(RuntimeAddress(destination));
416 }
417
418 // The java_calling_convention describes stack locations as ideal slots on
419 // a frame with no abi restrictions. Since we must observe abi restrictions
420 // (like the placement of the register window) the slots must be biased by
421 // the following value.
reg2offset_in(VMReg r)422 static int reg2offset_in(VMReg r) {
423 // Account for saved rbp and return address
424 // This should really be in_preserve_stack_slots
425 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
426 }
427
reg2offset_out(VMReg r)428 static int reg2offset_out(VMReg r) {
429 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
430 }
431
432 // ---------------------------------------------------------------------------
433 // Read the array of BasicTypes from a signature, and compute where the
434 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
435 // quantities. Values less than VMRegImpl::stack0 are registers, those above
436 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
437 // as framesizes are fixed.
438 // VMRegImpl::stack0 refers to the first slot 0(sp).
439 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
440 // up to RegisterImpl::number_of_registers) are the 64-bit
441 // integer registers.
442
443 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
444 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
445 // units regardless of build. Of course for i486 there is no 64 bit build
446
447 // The Java calling convention is a "shifted" version of the C ABI.
448 // By skipping the first C ABI register we can call non-static jni methods
449 // with small numbers of arguments without having to shuffle the arguments
450 // at all. Since we control the java ABI we ought to at least get some
451 // advantage out of it.
452
java_calling_convention(const BasicType * sig_bt,VMRegPair * regs,int total_args_passed,int is_outgoing)453 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
454 VMRegPair *regs,
455 int total_args_passed,
456 int is_outgoing) {
457
458 // Create the mapping between argument positions and
459 // registers.
460 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
461 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
462 };
463 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
464 j_farg0, j_farg1, j_farg2, j_farg3,
465 j_farg4, j_farg5, j_farg6, j_farg7
466 };
467
468
469 uint int_args = 0;
470 uint fp_args = 0;
471 uint stk_args = 0; // inc by 2 each time
472
473 for (int i = 0; i < total_args_passed; i++) {
474 switch (sig_bt[i]) {
475 case T_BOOLEAN:
476 case T_CHAR:
477 case T_BYTE:
478 case T_SHORT:
479 case T_INT:
480 if (int_args < Argument::n_int_register_parameters_j) {
481 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
482 } else {
483 regs[i].set1(VMRegImpl::stack2reg(stk_args));
484 stk_args += 2;
485 }
486 break;
487 case T_VOID:
488 // halves of T_LONG or T_DOUBLE
489 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
490 regs[i].set_bad();
491 break;
492 case T_LONG:
493 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
494 // fall through
495 case T_OBJECT:
496 case T_ARRAY:
497 case T_ADDRESS:
498 if (int_args < Argument::n_int_register_parameters_j) {
499 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
500 } else {
501 regs[i].set2(VMRegImpl::stack2reg(stk_args));
502 stk_args += 2;
503 }
504 break;
505 case T_FLOAT:
506 if (fp_args < Argument::n_float_register_parameters_j) {
507 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
508 } else {
509 regs[i].set1(VMRegImpl::stack2reg(stk_args));
510 stk_args += 2;
511 }
512 break;
513 case T_DOUBLE:
514 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
515 if (fp_args < Argument::n_float_register_parameters_j) {
516 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
517 } else {
518 regs[i].set2(VMRegImpl::stack2reg(stk_args));
519 stk_args += 2;
520 }
521 break;
522 default:
523 ShouldNotReachHere();
524 break;
525 }
526 }
527
528 return align_up(stk_args, 2);
529 }
530
531 // Patch the callers callsite with entry to compiled code if it exists.
patch_callers_callsite(MacroAssembler * masm)532 static void patch_callers_callsite(MacroAssembler *masm) {
533 Label L;
534 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
535 __ jcc(Assembler::equal, L);
536
537 // Save the current stack pointer
538 __ mov(r13, rsp);
539 // Schedule the branch target address early.
540 // Call into the VM to patch the caller, then jump to compiled callee
541 // rax isn't live so capture return address while we easily can
542 __ movptr(rax, Address(rsp, 0));
543
544 // align stack so push_CPU_state doesn't fault
545 __ andptr(rsp, -(StackAlignmentInBytes));
546 __ push_CPU_state();
547 __ vzeroupper();
548 // VM needs caller's callsite
549 // VM needs target method
550 // This needs to be a long call since we will relocate this adapter to
551 // the codeBuffer and it may not reach
552
553 // Allocate argument register save area
554 if (frame::arg_reg_save_area_bytes != 0) {
555 __ subptr(rsp, frame::arg_reg_save_area_bytes);
556 }
557 __ mov(c_rarg0, rbx);
558 __ mov(c_rarg1, rax);
559 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
560
561 // De-allocate argument register save area
562 if (frame::arg_reg_save_area_bytes != 0) {
563 __ addptr(rsp, frame::arg_reg_save_area_bytes);
564 }
565
566 __ vzeroupper();
567 __ pop_CPU_state();
568 // restore sp
569 __ mov(rsp, r13);
570 __ bind(L);
571 }
572
573
gen_c2i_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,Label & skip_fixup)574 static void gen_c2i_adapter(MacroAssembler *masm,
575 int total_args_passed,
576 int comp_args_on_stack,
577 const BasicType *sig_bt,
578 const VMRegPair *regs,
579 Label& skip_fixup) {
580 // Before we get into the guts of the C2I adapter, see if we should be here
581 // at all. We've come from compiled code and are attempting to jump to the
582 // interpreter, which means the caller made a static call to get here
583 // (vcalls always get a compiled target if there is one). Check for a
584 // compiled target. If there is one, we need to patch the caller's call.
585 patch_callers_callsite(masm);
586
587 __ bind(skip_fixup);
588
589 // Since all args are passed on the stack, total_args_passed *
590 // Interpreter::stackElementSize is the space we need. Plus 1 because
591 // we also account for the return address location since
592 // we store it first rather than hold it in rax across all the shuffling
593
594 int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
595
596 // stack is aligned, keep it that way
597 extraspace = align_up(extraspace, 2*wordSize);
598
599 // Get return address
600 __ pop(rax);
601
602 // set senderSP value
603 __ mov(r13, rsp);
604
605 __ subptr(rsp, extraspace);
606
607 // Store the return address in the expected location
608 __ movptr(Address(rsp, 0), rax);
609
610 // Now write the args into the outgoing interpreter space
611 for (int i = 0; i < total_args_passed; i++) {
612 if (sig_bt[i] == T_VOID) {
613 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
614 continue;
615 }
616
617 // offset to start parameters
618 int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
619 int next_off = st_off - Interpreter::stackElementSize;
620
621 // Say 4 args:
622 // i st_off
623 // 0 32 T_LONG
624 // 1 24 T_VOID
625 // 2 16 T_OBJECT
626 // 3 8 T_BOOL
627 // - 0 return address
628 //
629 // However to make thing extra confusing. Because we can fit a long/double in
630 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
631 // leaves one slot empty and only stores to a single slot. In this case the
632 // slot that is occupied is the T_VOID slot. See I said it was confusing.
633
634 VMReg r_1 = regs[i].first();
635 VMReg r_2 = regs[i].second();
636 if (!r_1->is_valid()) {
637 assert(!r_2->is_valid(), "");
638 continue;
639 }
640 if (r_1->is_stack()) {
641 // memory to memory use rax
642 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
643 if (!r_2->is_valid()) {
644 // sign extend??
645 __ movl(rax, Address(rsp, ld_off));
646 __ movptr(Address(rsp, st_off), rax);
647
648 } else {
649
650 __ movq(rax, Address(rsp, ld_off));
651
652 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
653 // T_DOUBLE and T_LONG use two slots in the interpreter
654 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
655 // ld_off == LSW, ld_off+wordSize == MSW
656 // st_off == MSW, next_off == LSW
657 __ movq(Address(rsp, next_off), rax);
658 #ifdef ASSERT
659 // Overwrite the unused slot with known junk
660 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
661 __ movptr(Address(rsp, st_off), rax);
662 #endif /* ASSERT */
663 } else {
664 __ movq(Address(rsp, st_off), rax);
665 }
666 }
667 } else if (r_1->is_Register()) {
668 Register r = r_1->as_Register();
669 if (!r_2->is_valid()) {
670 // must be only an int (or less ) so move only 32bits to slot
671 // why not sign extend??
672 __ movl(Address(rsp, st_off), r);
673 } else {
674 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
675 // T_DOUBLE and T_LONG use two slots in the interpreter
676 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
677 // long/double in gpr
678 #ifdef ASSERT
679 // Overwrite the unused slot with known junk
680 __ mov64(rax, CONST64(0xdeadffffdeadaaab));
681 __ movptr(Address(rsp, st_off), rax);
682 #endif /* ASSERT */
683 __ movq(Address(rsp, next_off), r);
684 } else {
685 __ movptr(Address(rsp, st_off), r);
686 }
687 }
688 } else {
689 assert(r_1->is_XMMRegister(), "");
690 if (!r_2->is_valid()) {
691 // only a float use just part of the slot
692 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
693 } else {
694 #ifdef ASSERT
695 // Overwrite the unused slot with known junk
696 __ mov64(rax, CONST64(0xdeadffffdeadaaac));
697 __ movptr(Address(rsp, st_off), rax);
698 #endif /* ASSERT */
699 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
700 }
701 }
702 }
703
704 // Schedule the branch target address early.
705 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
706 __ jmp(rcx);
707 }
708
range_check(MacroAssembler * masm,Register pc_reg,Register temp_reg,address code_start,address code_end,Label & L_ok)709 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
710 address code_start, address code_end,
711 Label& L_ok) {
712 Label L_fail;
713 __ lea(temp_reg, ExternalAddress(code_start));
714 __ cmpptr(pc_reg, temp_reg);
715 __ jcc(Assembler::belowEqual, L_fail);
716 __ lea(temp_reg, ExternalAddress(code_end));
717 __ cmpptr(pc_reg, temp_reg);
718 __ jcc(Assembler::below, L_ok);
719 __ bind(L_fail);
720 }
721
gen_i2c_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs)722 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
723 int total_args_passed,
724 int comp_args_on_stack,
725 const BasicType *sig_bt,
726 const VMRegPair *regs) {
727
728 // Note: r13 contains the senderSP on entry. We must preserve it since
729 // we may do a i2c -> c2i transition if we lose a race where compiled
730 // code goes non-entrant while we get args ready.
731 // In addition we use r13 to locate all the interpreter args as
732 // we must align the stack to 16 bytes on an i2c entry else we
733 // lose alignment we expect in all compiled code and register
734 // save code can segv when fxsave instructions find improperly
735 // aligned stack pointer.
736
737 // Adapters can be frameless because they do not require the caller
738 // to perform additional cleanup work, such as correcting the stack pointer.
739 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
740 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
741 // even if a callee has modified the stack pointer.
742 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
743 // routinely repairs its caller's stack pointer (from sender_sp, which is set
744 // up via the senderSP register).
745 // In other words, if *either* the caller or callee is interpreted, we can
746 // get the stack pointer repaired after a call.
747 // This is why c2i and i2c adapters cannot be indefinitely composed.
748 // In particular, if a c2i adapter were to somehow call an i2c adapter,
749 // both caller and callee would be compiled methods, and neither would
750 // clean up the stack pointer changes performed by the two adapters.
751 // If this happens, control eventually transfers back to the compiled
752 // caller, but with an uncorrected stack, causing delayed havoc.
753
754 // Pick up the return address
755 __ movptr(rax, Address(rsp, 0));
756
757 if (VerifyAdapterCalls &&
758 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
759 // So, let's test for cascading c2i/i2c adapters right now.
760 // assert(Interpreter::contains($return_addr) ||
761 // StubRoutines::contains($return_addr),
762 // "i2c adapter must return to an interpreter frame");
763 __ block_comment("verify_i2c { ");
764 Label L_ok;
765 if (Interpreter::code() != NULL)
766 range_check(masm, rax, r11,
767 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
768 L_ok);
769 if (StubRoutines::code1() != NULL)
770 range_check(masm, rax, r11,
771 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
772 L_ok);
773 if (StubRoutines::code2() != NULL)
774 range_check(masm, rax, r11,
775 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
776 L_ok);
777 const char* msg = "i2c adapter must return to an interpreter frame";
778 __ block_comment(msg);
779 __ stop(msg);
780 __ bind(L_ok);
781 __ block_comment("} verify_i2ce ");
782 }
783
784 // Must preserve original SP for loading incoming arguments because
785 // we need to align the outgoing SP for compiled code.
786 __ movptr(r11, rsp);
787
788 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
789 // in registers, we will occasionally have no stack args.
790 int comp_words_on_stack = 0;
791 if (comp_args_on_stack) {
792 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
793 // registers are below. By subtracting stack0, we either get a negative
794 // number (all values in registers) or the maximum stack slot accessed.
795
796 // Convert 4-byte c2 stack slots to words.
797 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
798 // Round up to miminum stack alignment, in wordSize
799 comp_words_on_stack = align_up(comp_words_on_stack, 2);
800 __ subptr(rsp, comp_words_on_stack * wordSize);
801 }
802
803
804 // Ensure compiled code always sees stack at proper alignment
805 __ andptr(rsp, -16);
806
807 // push the return address and misalign the stack that youngest frame always sees
808 // as far as the placement of the call instruction
809 __ push(rax);
810
811 // Put saved SP in another register
812 const Register saved_sp = rax;
813 __ movptr(saved_sp, r11);
814
815 // Will jump to the compiled code just as if compiled code was doing it.
816 // Pre-load the register-jump target early, to schedule it better.
817 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
818
819 #if INCLUDE_JVMCI
820 if (EnableJVMCI || UseAOT) {
821 // check if this call should be routed towards a specific entry point
822 __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
823 Label no_alternative_target;
824 __ jcc(Assembler::equal, no_alternative_target);
825 __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
826 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
827 __ bind(no_alternative_target);
828 }
829 #endif // INCLUDE_JVMCI
830
831 // Now generate the shuffle code. Pick up all register args and move the
832 // rest through the floating point stack top.
833 for (int i = 0; i < total_args_passed; i++) {
834 if (sig_bt[i] == T_VOID) {
835 // Longs and doubles are passed in native word order, but misaligned
836 // in the 32-bit build.
837 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
838 continue;
839 }
840
841 // Pick up 0, 1 or 2 words from SP+offset.
842
843 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
844 "scrambled load targets?");
845 // Load in argument order going down.
846 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
847 // Point to interpreter value (vs. tag)
848 int next_off = ld_off - Interpreter::stackElementSize;
849 //
850 //
851 //
852 VMReg r_1 = regs[i].first();
853 VMReg r_2 = regs[i].second();
854 if (!r_1->is_valid()) {
855 assert(!r_2->is_valid(), "");
856 continue;
857 }
858 if (r_1->is_stack()) {
859 // Convert stack slot to an SP offset (+ wordSize to account for return address )
860 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
861
862 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
863 // and if we end up going thru a c2i because of a miss a reasonable value of r13
864 // will be generated.
865 if (!r_2->is_valid()) {
866 // sign extend???
867 __ movl(r13, Address(saved_sp, ld_off));
868 __ movptr(Address(rsp, st_off), r13);
869 } else {
870 //
871 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
872 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
873 // So we must adjust where to pick up the data to match the interpreter.
874 //
875 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
876 // are accessed as negative so LSW is at LOW address
877
878 // ld_off is MSW so get LSW
879 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
880 next_off : ld_off;
881 __ movq(r13, Address(saved_sp, offset));
882 // st_off is LSW (i.e. reg.first())
883 __ movq(Address(rsp, st_off), r13);
884 }
885 } else if (r_1->is_Register()) { // Register argument
886 Register r = r_1->as_Register();
887 assert(r != rax, "must be different");
888 if (r_2->is_valid()) {
889 //
890 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
891 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
892 // So we must adjust where to pick up the data to match the interpreter.
893
894 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
895 next_off : ld_off;
896
897 // this can be a misaligned move
898 __ movq(r, Address(saved_sp, offset));
899 } else {
900 // sign extend and use a full word?
901 __ movl(r, Address(saved_sp, ld_off));
902 }
903 } else {
904 if (!r_2->is_valid()) {
905 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
906 } else {
907 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
908 }
909 }
910 }
911
912 // 6243940 We might end up in handle_wrong_method if
913 // the callee is deoptimized as we race thru here. If that
914 // happens we don't want to take a safepoint because the
915 // caller frame will look interpreted and arguments are now
916 // "compiled" so it is much better to make this transition
917 // invisible to the stack walking code. Unfortunately if
918 // we try and find the callee by normal means a safepoint
919 // is possible. So we stash the desired callee in the thread
920 // and the vm will find there should this case occur.
921
922 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
923
924 // put Method* where a c2i would expect should we end up there
925 // only needed becaus eof c2 resolve stubs return Method* as a result in
926 // rax
927 __ mov(rax, rbx);
928 __ jmp(r11);
929 }
930
931 // ---------------------------------------------------------------
generate_i2c2i_adapters(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,AdapterFingerPrint * fingerprint)932 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
933 int total_args_passed,
934 int comp_args_on_stack,
935 const BasicType *sig_bt,
936 const VMRegPair *regs,
937 AdapterFingerPrint* fingerprint) {
938 address i2c_entry = __ pc();
939
940 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
941
942 // -------------------------------------------------------------------------
943 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
944 // to the interpreter. The args start out packed in the compiled layout. They
945 // need to be unpacked into the interpreter layout. This will almost always
946 // require some stack space. We grow the current (compiled) stack, then repack
947 // the args. We finally end in a jump to the generic interpreter entry point.
948 // On exit from the interpreter, the interpreter will restore our SP (lest the
949 // compiled code, which relys solely on SP and not RBP, get sick).
950
951 address c2i_unverified_entry = __ pc();
952 Label skip_fixup;
953 Label ok;
954
955 Register holder = rax;
956 Register receiver = j_rarg0;
957 Register temp = rbx;
958
959 {
960 __ load_klass(temp, receiver);
961 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
962 __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
963 __ jcc(Assembler::equal, ok);
964 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
965
966 __ bind(ok);
967 // Method might have been compiled since the call site was patched to
968 // interpreted if that is the case treat it as a miss so we can get
969 // the call site corrected.
970 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
971 __ jcc(Assembler::equal, skip_fixup);
972 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
973 }
974
975 address c2i_entry = __ pc();
976
977 // Class initialization barrier for static methods
978 address c2i_no_clinit_check_entry = NULL;
979 if (VM_Version::supports_fast_class_init_checks()) {
980 Label L_skip_barrier;
981 Register method = rbx;
982
983 { // Bypass the barrier for non-static methods
984 Register flags = rscratch1;
985 __ movl(flags, Address(method, Method::access_flags_offset()));
986 __ testl(flags, JVM_ACC_STATIC);
987 __ jcc(Assembler::zero, L_skip_barrier); // non-static
988 }
989
990 Register klass = rscratch1;
991 __ load_method_holder(klass, method);
992 __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
993
994 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
995
996 __ bind(L_skip_barrier);
997 c2i_no_clinit_check_entry = __ pc();
998 }
999
1000 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1001 bs->c2i_entry_barrier(masm);
1002
1003 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1004
1005 __ flush();
1006 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
1007 }
1008
c_calling_convention(const BasicType * sig_bt,VMRegPair * regs,VMRegPair * regs2,int total_args_passed)1009 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1010 VMRegPair *regs,
1011 VMRegPair *regs2,
1012 int total_args_passed) {
1013 assert(regs2 == NULL, "not needed on x86");
1014 // We return the amount of VMRegImpl stack slots we need to reserve for all
1015 // the arguments NOT counting out_preserve_stack_slots.
1016
1017 // NOTE: These arrays will have to change when c1 is ported
1018 #ifdef _WIN64
1019 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1020 c_rarg0, c_rarg1, c_rarg2, c_rarg3
1021 };
1022 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1023 c_farg0, c_farg1, c_farg2, c_farg3
1024 };
1025 #else
1026 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1027 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
1028 };
1029 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1030 c_farg0, c_farg1, c_farg2, c_farg3,
1031 c_farg4, c_farg5, c_farg6, c_farg7
1032 };
1033 #endif // _WIN64
1034
1035
1036 uint int_args = 0;
1037 uint fp_args = 0;
1038 uint stk_args = 0; // inc by 2 each time
1039
1040 for (int i = 0; i < total_args_passed; i++) {
1041 switch (sig_bt[i]) {
1042 case T_BOOLEAN:
1043 case T_CHAR:
1044 case T_BYTE:
1045 case T_SHORT:
1046 case T_INT:
1047 if (int_args < Argument::n_int_register_parameters_c) {
1048 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1049 #ifdef _WIN64
1050 fp_args++;
1051 // Allocate slots for callee to stuff register args the stack.
1052 stk_args += 2;
1053 #endif
1054 } else {
1055 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1056 stk_args += 2;
1057 }
1058 break;
1059 case T_LONG:
1060 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1061 // fall through
1062 case T_OBJECT:
1063 case T_ARRAY:
1064 case T_ADDRESS:
1065 case T_METADATA:
1066 if (int_args < Argument::n_int_register_parameters_c) {
1067 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1068 #ifdef _WIN64
1069 fp_args++;
1070 stk_args += 2;
1071 #endif
1072 } else {
1073 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1074 stk_args += 2;
1075 }
1076 break;
1077 case T_FLOAT:
1078 if (fp_args < Argument::n_float_register_parameters_c) {
1079 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1080 #ifdef _WIN64
1081 int_args++;
1082 // Allocate slots for callee to stuff register args the stack.
1083 stk_args += 2;
1084 #endif
1085 } else {
1086 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1087 stk_args += 2;
1088 }
1089 break;
1090 case T_DOUBLE:
1091 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1092 if (fp_args < Argument::n_float_register_parameters_c) {
1093 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1094 #ifdef _WIN64
1095 int_args++;
1096 // Allocate slots for callee to stuff register args the stack.
1097 stk_args += 2;
1098 #endif
1099 } else {
1100 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1101 stk_args += 2;
1102 }
1103 break;
1104 case T_VOID: // Halves of longs and doubles
1105 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1106 regs[i].set_bad();
1107 break;
1108 default:
1109 ShouldNotReachHere();
1110 break;
1111 }
1112 }
1113 #ifdef _WIN64
1114 // windows abi requires that we always allocate enough stack space
1115 // for 4 64bit registers to be stored down.
1116 if (stk_args < 8) {
1117 stk_args = 8;
1118 }
1119 #endif // _WIN64
1120
1121 return stk_args;
1122 }
1123
1124 // On 64 bit we will store integer like items to the stack as
1125 // 64 bits items (sparc abi) even though java would only store
1126 // 32bits for a parameter. On 32bit it will simply be 32 bits
1127 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
move32_64(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1128 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1129 if (src.first()->is_stack()) {
1130 if (dst.first()->is_stack()) {
1131 // stack to stack
1132 __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
1133 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1134 } else {
1135 // stack to reg
1136 __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1137 }
1138 } else if (dst.first()->is_stack()) {
1139 // reg to stack
1140 // Do we really have to sign extend???
1141 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1142 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1143 } else {
1144 // Do we really have to sign extend???
1145 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
1146 if (dst.first() != src.first()) {
1147 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1148 }
1149 }
1150 }
1151
move_ptr(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1152 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1153 if (src.first()->is_stack()) {
1154 if (dst.first()->is_stack()) {
1155 // stack to stack
1156 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1157 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1158 } else {
1159 // stack to reg
1160 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1161 }
1162 } else if (dst.first()->is_stack()) {
1163 // reg to stack
1164 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1165 } else {
1166 if (dst.first() != src.first()) {
1167 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1168 }
1169 }
1170 }
1171
1172 // An oop arg. Must pass a handle not the oop itself
object_move(MacroAssembler * masm,OopMap * map,int oop_handle_offset,int framesize_in_slots,VMRegPair src,VMRegPair dst,bool is_receiver,int * receiver_offset)1173 static void object_move(MacroAssembler* masm,
1174 OopMap* map,
1175 int oop_handle_offset,
1176 int framesize_in_slots,
1177 VMRegPair src,
1178 VMRegPair dst,
1179 bool is_receiver,
1180 int* receiver_offset) {
1181
1182 // must pass a handle. First figure out the location we use as a handle
1183
1184 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1185
1186 // See if oop is NULL if it is we need no handle
1187
1188 if (src.first()->is_stack()) {
1189
1190 // Oop is already on the stack as an argument
1191 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1192 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1193 if (is_receiver) {
1194 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1195 }
1196
1197 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1198 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1199 // conditionally move a NULL
1200 __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1201 } else {
1202
1203 // Oop is in an a register we must store it to the space we reserve
1204 // on the stack for oop_handles and pass a handle if oop is non-NULL
1205
1206 const Register rOop = src.first()->as_Register();
1207 int oop_slot;
1208 if (rOop == j_rarg0)
1209 oop_slot = 0;
1210 else if (rOop == j_rarg1)
1211 oop_slot = 1;
1212 else if (rOop == j_rarg2)
1213 oop_slot = 2;
1214 else if (rOop == j_rarg3)
1215 oop_slot = 3;
1216 else if (rOop == j_rarg4)
1217 oop_slot = 4;
1218 else {
1219 assert(rOop == j_rarg5, "wrong register");
1220 oop_slot = 5;
1221 }
1222
1223 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1224 int offset = oop_slot*VMRegImpl::stack_slot_size;
1225
1226 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1227 // Store oop in handle area, may be NULL
1228 __ movptr(Address(rsp, offset), rOop);
1229 if (is_receiver) {
1230 *receiver_offset = offset;
1231 }
1232
1233 __ cmpptr(rOop, (int32_t)NULL_WORD);
1234 __ lea(rHandle, Address(rsp, offset));
1235 // conditionally move a NULL from the handle area where it was just stored
1236 __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1237 }
1238
1239 // If arg is on the stack then place it otherwise it is already in correct reg.
1240 if (dst.first()->is_stack()) {
1241 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1242 }
1243 }
1244
1245 // A float arg may have to do float reg int reg conversion
float_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1246 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1247 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1248
1249 // The calling conventions assures us that each VMregpair is either
1250 // all really one physical register or adjacent stack slots.
1251 // This greatly simplifies the cases here compared to sparc.
1252
1253 if (src.first()->is_stack()) {
1254 if (dst.first()->is_stack()) {
1255 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1256 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1257 } else {
1258 // stack to reg
1259 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1260 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1261 }
1262 } else if (dst.first()->is_stack()) {
1263 // reg to stack
1264 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1265 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1266 } else {
1267 // reg to reg
1268 // In theory these overlap but the ordering is such that this is likely a nop
1269 if ( src.first() != dst.first()) {
1270 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1271 }
1272 }
1273 }
1274
1275 // A long move
long_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1276 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1277
1278 // The calling conventions assures us that each VMregpair is either
1279 // all really one physical register or adjacent stack slots.
1280 // This greatly simplifies the cases here compared to sparc.
1281
1282 if (src.is_single_phys_reg() ) {
1283 if (dst.is_single_phys_reg()) {
1284 if (dst.first() != src.first()) {
1285 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1286 }
1287 } else {
1288 assert(dst.is_single_reg(), "not a stack pair");
1289 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1290 }
1291 } else if (dst.is_single_phys_reg()) {
1292 assert(src.is_single_reg(), "not a stack pair");
1293 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1294 } else {
1295 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1296 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1297 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1298 }
1299 }
1300
1301 // A double move
double_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1302 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1303
1304 // The calling conventions assures us that each VMregpair is either
1305 // all really one physical register or adjacent stack slots.
1306 // This greatly simplifies the cases here compared to sparc.
1307
1308 if (src.is_single_phys_reg() ) {
1309 if (dst.is_single_phys_reg()) {
1310 // In theory these overlap but the ordering is such that this is likely a nop
1311 if ( src.first() != dst.first()) {
1312 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1313 }
1314 } else {
1315 assert(dst.is_single_reg(), "not a stack pair");
1316 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1317 }
1318 } else if (dst.is_single_phys_reg()) {
1319 assert(src.is_single_reg(), "not a stack pair");
1320 __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1321 } else {
1322 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1323 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1324 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1325 }
1326 }
1327
1328
save_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1329 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1330 // We always ignore the frame_slots arg and just use the space just below frame pointer
1331 // which by this time is free to use
1332 switch (ret_type) {
1333 case T_FLOAT:
1334 __ movflt(Address(rbp, -wordSize), xmm0);
1335 break;
1336 case T_DOUBLE:
1337 __ movdbl(Address(rbp, -wordSize), xmm0);
1338 break;
1339 case T_VOID: break;
1340 default: {
1341 __ movptr(Address(rbp, -wordSize), rax);
1342 }
1343 }
1344 }
1345
restore_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1346 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1347 // We always ignore the frame_slots arg and just use the space just below frame pointer
1348 // which by this time is free to use
1349 switch (ret_type) {
1350 case T_FLOAT:
1351 __ movflt(xmm0, Address(rbp, -wordSize));
1352 break;
1353 case T_DOUBLE:
1354 __ movdbl(xmm0, Address(rbp, -wordSize));
1355 break;
1356 case T_VOID: break;
1357 default: {
1358 __ movptr(rax, Address(rbp, -wordSize));
1359 }
1360 }
1361 }
1362
save_args(MacroAssembler * masm,int arg_count,int first_arg,VMRegPair * args)1363 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1364 for ( int i = first_arg ; i < arg_count ; i++ ) {
1365 if (args[i].first()->is_Register()) {
1366 __ push(args[i].first()->as_Register());
1367 } else if (args[i].first()->is_XMMRegister()) {
1368 __ subptr(rsp, 2*wordSize);
1369 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1370 }
1371 }
1372 }
1373
restore_args(MacroAssembler * masm,int arg_count,int first_arg,VMRegPair * args)1374 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1375 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1376 if (args[i].first()->is_Register()) {
1377 __ pop(args[i].first()->as_Register());
1378 } else if (args[i].first()->is_XMMRegister()) {
1379 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1380 __ addptr(rsp, 2*wordSize);
1381 }
1382 }
1383 }
1384
1385
save_or_restore_arguments(MacroAssembler * masm,const int stack_slots,const int total_in_args,const int arg_save_area,OopMap * map,VMRegPair * in_regs,BasicType * in_sig_bt)1386 static void save_or_restore_arguments(MacroAssembler* masm,
1387 const int stack_slots,
1388 const int total_in_args,
1389 const int arg_save_area,
1390 OopMap* map,
1391 VMRegPair* in_regs,
1392 BasicType* in_sig_bt) {
1393 // if map is non-NULL then the code should store the values,
1394 // otherwise it should load them.
1395 int slot = arg_save_area;
1396 // Save down double word first
1397 for ( int i = 0; i < total_in_args; i++) {
1398 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1399 int offset = slot * VMRegImpl::stack_slot_size;
1400 slot += VMRegImpl::slots_per_word;
1401 assert(slot <= stack_slots, "overflow");
1402 if (map != NULL) {
1403 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1404 } else {
1405 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1406 }
1407 }
1408 if (in_regs[i].first()->is_Register() &&
1409 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1410 int offset = slot * VMRegImpl::stack_slot_size;
1411 if (map != NULL) {
1412 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1413 if (in_sig_bt[i] == T_ARRAY) {
1414 map->set_oop(VMRegImpl::stack2reg(slot));;
1415 }
1416 } else {
1417 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1418 }
1419 slot += VMRegImpl::slots_per_word;
1420 }
1421 }
1422 // Save or restore single word registers
1423 for ( int i = 0; i < total_in_args; i++) {
1424 if (in_regs[i].first()->is_Register()) {
1425 int offset = slot * VMRegImpl::stack_slot_size;
1426 slot++;
1427 assert(slot <= stack_slots, "overflow");
1428
1429 // Value is in an input register pass we must flush it to the stack
1430 const Register reg = in_regs[i].first()->as_Register();
1431 switch (in_sig_bt[i]) {
1432 case T_BOOLEAN:
1433 case T_CHAR:
1434 case T_BYTE:
1435 case T_SHORT:
1436 case T_INT:
1437 if (map != NULL) {
1438 __ movl(Address(rsp, offset), reg);
1439 } else {
1440 __ movl(reg, Address(rsp, offset));
1441 }
1442 break;
1443 case T_ARRAY:
1444 case T_LONG:
1445 // handled above
1446 break;
1447 case T_OBJECT:
1448 default: ShouldNotReachHere();
1449 }
1450 } else if (in_regs[i].first()->is_XMMRegister()) {
1451 if (in_sig_bt[i] == T_FLOAT) {
1452 int offset = slot * VMRegImpl::stack_slot_size;
1453 slot++;
1454 assert(slot <= stack_slots, "overflow");
1455 if (map != NULL) {
1456 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1457 } else {
1458 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1459 }
1460 }
1461 } else if (in_regs[i].first()->is_stack()) {
1462 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1463 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1464 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1465 }
1466 }
1467 }
1468 }
1469
1470 // Pin object, return pinned object or null in rax
gen_pin_object(MacroAssembler * masm,VMRegPair reg)1471 static void gen_pin_object(MacroAssembler* masm,
1472 VMRegPair reg) {
1473 __ block_comment("gen_pin_object {");
1474
1475 // rax always contains oop, either incoming or
1476 // pinned.
1477 Register tmp_reg = rax;
1478
1479 Label is_null;
1480 VMRegPair tmp;
1481 VMRegPair in_reg = reg;
1482
1483 tmp.set_ptr(tmp_reg->as_VMReg());
1484 if (reg.first()->is_stack()) {
1485 // Load the arg up from the stack
1486 move_ptr(masm, reg, tmp);
1487 reg = tmp;
1488 } else {
1489 __ movptr(rax, reg.first()->as_Register());
1490 }
1491 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1492 __ jccb(Assembler::equal, is_null);
1493
1494 if (reg.first()->as_Register() != c_rarg1) {
1495 __ movptr(c_rarg1, reg.first()->as_Register());
1496 }
1497
1498 __ call_VM_leaf(
1499 CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
1500 r15_thread, c_rarg1);
1501
1502 __ bind(is_null);
1503 __ block_comment("} gen_pin_object");
1504 }
1505
1506 // Unpin object
gen_unpin_object(MacroAssembler * masm,VMRegPair reg)1507 static void gen_unpin_object(MacroAssembler* masm,
1508 VMRegPair reg) {
1509 __ block_comment("gen_unpin_object {");
1510 Label is_null;
1511
1512 if (reg.first()->is_stack()) {
1513 __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
1514 } else if (reg.first()->as_Register() != c_rarg1) {
1515 __ movptr(c_rarg1, reg.first()->as_Register());
1516 }
1517
1518 __ testptr(c_rarg1, c_rarg1);
1519 __ jccb(Assembler::equal, is_null);
1520
1521 __ call_VM_leaf(
1522 CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
1523 r15_thread, c_rarg1);
1524
1525 __ bind(is_null);
1526 __ block_comment("} gen_unpin_object");
1527 }
1528
1529 // Check GCLocker::needs_gc and enter the runtime if it's true. This
1530 // keeps a new JNI critical region from starting until a GC has been
1531 // forced. Save down any oops in registers and describe them in an
1532 // OopMap.
check_needs_gc_for_critical_native(MacroAssembler * masm,int stack_slots,int total_c_args,int total_in_args,int arg_save_area,OopMapSet * oop_maps,VMRegPair * in_regs,BasicType * in_sig_bt)1533 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1534 int stack_slots,
1535 int total_c_args,
1536 int total_in_args,
1537 int arg_save_area,
1538 OopMapSet* oop_maps,
1539 VMRegPair* in_regs,
1540 BasicType* in_sig_bt) {
1541 __ block_comment("check GCLocker::needs_gc");
1542 Label cont;
1543 __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1544 __ jcc(Assembler::equal, cont);
1545
1546 // Save down any incoming oops and call into the runtime to halt for a GC
1547
1548 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1549 save_or_restore_arguments(masm, stack_slots, total_in_args,
1550 arg_save_area, map, in_regs, in_sig_bt);
1551
1552 address the_pc = __ pc();
1553 oop_maps->add_gc_map( __ offset(), map);
1554 __ set_last_Java_frame(rsp, noreg, the_pc);
1555
1556 __ block_comment("block_for_jni_critical");
1557 __ movptr(c_rarg0, r15_thread);
1558 __ mov(r12, rsp); // remember sp
1559 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1560 __ andptr(rsp, -16); // align stack as required by ABI
1561 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1562 __ mov(rsp, r12); // restore sp
1563 __ reinit_heapbase();
1564
1565 __ reset_last_Java_frame(false);
1566
1567 save_or_restore_arguments(masm, stack_slots, total_in_args,
1568 arg_save_area, NULL, in_regs, in_sig_bt);
1569 __ bind(cont);
1570 #ifdef ASSERT
1571 if (StressCriticalJNINatives) {
1572 // Stress register saving
1573 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1574 save_or_restore_arguments(masm, stack_slots, total_in_args,
1575 arg_save_area, map, in_regs, in_sig_bt);
1576 // Destroy argument registers
1577 for (int i = 0; i < total_in_args - 1; i++) {
1578 if (in_regs[i].first()->is_Register()) {
1579 const Register reg = in_regs[i].first()->as_Register();
1580 __ xorptr(reg, reg);
1581 } else if (in_regs[i].first()->is_XMMRegister()) {
1582 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1583 } else if (in_regs[i].first()->is_FloatRegister()) {
1584 ShouldNotReachHere();
1585 } else if (in_regs[i].first()->is_stack()) {
1586 // Nothing to do
1587 } else {
1588 ShouldNotReachHere();
1589 }
1590 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1591 i++;
1592 }
1593 }
1594
1595 save_or_restore_arguments(masm, stack_slots, total_in_args,
1596 arg_save_area, NULL, in_regs, in_sig_bt);
1597 }
1598 #endif
1599 }
1600
1601 // Unpack an array argument into a pointer to the body and the length
1602 // if the array is non-null, otherwise pass 0 for both.
unpack_array_argument(MacroAssembler * masm,VMRegPair reg,BasicType in_elem_type,VMRegPair body_arg,VMRegPair length_arg)1603 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1604 Register tmp_reg = rax;
1605 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1606 "possible collision");
1607 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1608 "possible collision");
1609
1610 __ block_comment("unpack_array_argument {");
1611
1612 // Pass the length, ptr pair
1613 Label is_null, done;
1614 VMRegPair tmp;
1615 tmp.set_ptr(tmp_reg->as_VMReg());
1616 if (reg.first()->is_stack()) {
1617 // Load the arg up from the stack
1618 move_ptr(masm, reg, tmp);
1619 reg = tmp;
1620 }
1621 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1622 __ jccb(Assembler::equal, is_null);
1623 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1624 move_ptr(masm, tmp, body_arg);
1625 // load the length relative to the body.
1626 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1627 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1628 move32_64(masm, tmp, length_arg);
1629 __ jmpb(done);
1630 __ bind(is_null);
1631 // Pass zeros
1632 __ xorptr(tmp_reg, tmp_reg);
1633 move_ptr(masm, tmp, body_arg);
1634 move32_64(masm, tmp, length_arg);
1635 __ bind(done);
1636
1637 __ block_comment("} unpack_array_argument");
1638 }
1639
1640
1641 // Different signatures may require very different orders for the move
1642 // to avoid clobbering other arguments. There's no simple way to
1643 // order them safely. Compute a safe order for issuing stores and
1644 // break any cycles in those stores. This code is fairly general but
1645 // it's not necessary on the other platforms so we keep it in the
1646 // platform dependent code instead of moving it into a shared file.
1647 // (See bugs 7013347 & 7145024.)
1648 // Note that this code is specific to LP64.
1649 class ComputeMoveOrder: public StackObj {
1650 class MoveOperation: public ResourceObj {
1651 friend class ComputeMoveOrder;
1652 private:
1653 VMRegPair _src;
1654 VMRegPair _dst;
1655 int _src_index;
1656 int _dst_index;
1657 bool _processed;
1658 MoveOperation* _next;
1659 MoveOperation* _prev;
1660
get_id(VMRegPair r)1661 static int get_id(VMRegPair r) {
1662 return r.first()->value();
1663 }
1664
1665 public:
MoveOperation(int src_index,VMRegPair src,int dst_index,VMRegPair dst)1666 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1667 _src(src)
1668 , _dst(dst)
1669 , _src_index(src_index)
1670 , _dst_index(dst_index)
1671 , _processed(false)
1672 , _next(NULL)
1673 , _prev(NULL) {
1674 }
1675
src() const1676 VMRegPair src() const { return _src; }
src_id() const1677 int src_id() const { return get_id(src()); }
src_index() const1678 int src_index() const { return _src_index; }
dst() const1679 VMRegPair dst() const { return _dst; }
set_dst(int i,VMRegPair dst)1680 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
dst_index() const1681 int dst_index() const { return _dst_index; }
dst_id() const1682 int dst_id() const { return get_id(dst()); }
next() const1683 MoveOperation* next() const { return _next; }
prev() const1684 MoveOperation* prev() const { return _prev; }
set_processed()1685 void set_processed() { _processed = true; }
is_processed() const1686 bool is_processed() const { return _processed; }
1687
1688 // insert
break_cycle(VMRegPair temp_register)1689 void break_cycle(VMRegPair temp_register) {
1690 // create a new store following the last store
1691 // to move from the temp_register to the original
1692 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1693
1694 // break the cycle of links and insert new_store at the end
1695 // break the reverse link.
1696 MoveOperation* p = prev();
1697 assert(p->next() == this, "must be");
1698 _prev = NULL;
1699 p->_next = new_store;
1700 new_store->_prev = p;
1701
1702 // change the original store to save it's value in the temp.
1703 set_dst(-1, temp_register);
1704 }
1705
link(GrowableArray<MoveOperation * > & killer)1706 void link(GrowableArray<MoveOperation*>& killer) {
1707 // link this store in front the store that it depends on
1708 MoveOperation* n = killer.at_grow(src_id(), NULL);
1709 if (n != NULL) {
1710 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1711 _next = n;
1712 n->_prev = this;
1713 }
1714 }
1715 };
1716
1717 private:
1718 GrowableArray<MoveOperation*> edges;
1719
1720 public:
ComputeMoveOrder(int total_in_args,VMRegPair * in_regs,int total_c_args,VMRegPair * out_regs,BasicType * in_sig_bt,GrowableArray<int> & arg_order,VMRegPair tmp_vmreg)1721 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1722 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1723 // Move operations where the dest is the stack can all be
1724 // scheduled first since they can't interfere with the other moves.
1725 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1726 if (in_sig_bt[i] == T_ARRAY) {
1727 c_arg--;
1728 if (out_regs[c_arg].first()->is_stack() &&
1729 out_regs[c_arg + 1].first()->is_stack()) {
1730 arg_order.push(i);
1731 arg_order.push(c_arg);
1732 } else {
1733 if (out_regs[c_arg].first()->is_stack() ||
1734 in_regs[i].first() == out_regs[c_arg].first()) {
1735 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1736 } else {
1737 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1738 }
1739 }
1740 } else if (in_sig_bt[i] == T_VOID) {
1741 arg_order.push(i);
1742 arg_order.push(c_arg);
1743 } else {
1744 if (out_regs[c_arg].first()->is_stack() ||
1745 in_regs[i].first() == out_regs[c_arg].first()) {
1746 arg_order.push(i);
1747 arg_order.push(c_arg);
1748 } else {
1749 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1750 }
1751 }
1752 }
1753 // Break any cycles in the register moves and emit the in the
1754 // proper order.
1755 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1756 for (int i = 0; i < stores->length(); i++) {
1757 arg_order.push(stores->at(i)->src_index());
1758 arg_order.push(stores->at(i)->dst_index());
1759 }
1760 }
1761
1762 // Collected all the move operations
add_edge(int src_index,VMRegPair src,int dst_index,VMRegPair dst)1763 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1764 if (src.first() == dst.first()) return;
1765 edges.append(new MoveOperation(src_index, src, dst_index, dst));
1766 }
1767
1768 // Walk the edges breaking cycles between moves. The result list
1769 // can be walked in order to produce the proper set of loads
get_store_order(VMRegPair temp_register)1770 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1771 // Record which moves kill which values
1772 GrowableArray<MoveOperation*> killer;
1773 for (int i = 0; i < edges.length(); i++) {
1774 MoveOperation* s = edges.at(i);
1775 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1776 killer.at_put_grow(s->dst_id(), s, NULL);
1777 }
1778 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1779 "make sure temp isn't in the registers that are killed");
1780
1781 // create links between loads and stores
1782 for (int i = 0; i < edges.length(); i++) {
1783 edges.at(i)->link(killer);
1784 }
1785
1786 // at this point, all the move operations are chained together
1787 // in a doubly linked list. Processing it backwards finds
1788 // the beginning of the chain, forwards finds the end. If there's
1789 // a cycle it can be broken at any point, so pick an edge and walk
1790 // backward until the list ends or we end where we started.
1791 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1792 for (int e = 0; e < edges.length(); e++) {
1793 MoveOperation* s = edges.at(e);
1794 if (!s->is_processed()) {
1795 MoveOperation* start = s;
1796 // search for the beginning of the chain or cycle
1797 while (start->prev() != NULL && start->prev() != s) {
1798 start = start->prev();
1799 }
1800 if (start->prev() == s) {
1801 start->break_cycle(temp_register);
1802 }
1803 // walk the chain forward inserting to store list
1804 while (start != NULL) {
1805 stores->append(start);
1806 start->set_processed();
1807 start = start->next();
1808 }
1809 }
1810 }
1811 return stores;
1812 }
1813 };
1814
verify_oop_args(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1815 static void verify_oop_args(MacroAssembler* masm,
1816 const methodHandle& method,
1817 const BasicType* sig_bt,
1818 const VMRegPair* regs) {
1819 Register temp_reg = rbx; // not part of any compiled calling seq
1820 if (VerifyOops) {
1821 for (int i = 0; i < method->size_of_parameters(); i++) {
1822 if (sig_bt[i] == T_OBJECT ||
1823 sig_bt[i] == T_ARRAY) {
1824 VMReg r = regs[i].first();
1825 assert(r->is_valid(), "bad oop arg");
1826 if (r->is_stack()) {
1827 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1828 __ verify_oop(temp_reg);
1829 } else {
1830 __ verify_oop(r->as_Register());
1831 }
1832 }
1833 }
1834 }
1835 }
1836
gen_special_dispatch(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1837 static void gen_special_dispatch(MacroAssembler* masm,
1838 const methodHandle& method,
1839 const BasicType* sig_bt,
1840 const VMRegPair* regs) {
1841 verify_oop_args(masm, method, sig_bt, regs);
1842 vmIntrinsics::ID iid = method->intrinsic_id();
1843
1844 // Now write the args into the outgoing interpreter space
1845 bool has_receiver = false;
1846 Register receiver_reg = noreg;
1847 int member_arg_pos = -1;
1848 Register member_reg = noreg;
1849 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1850 if (ref_kind != 0) {
1851 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1852 member_reg = rbx; // known to be free at this point
1853 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1854 } else if (iid == vmIntrinsics::_invokeBasic) {
1855 has_receiver = true;
1856 } else {
1857 fatal("unexpected intrinsic id %d", iid);
1858 }
1859
1860 if (member_reg != noreg) {
1861 // Load the member_arg into register, if necessary.
1862 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1863 VMReg r = regs[member_arg_pos].first();
1864 if (r->is_stack()) {
1865 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1866 } else {
1867 // no data motion is needed
1868 member_reg = r->as_Register();
1869 }
1870 }
1871
1872 if (has_receiver) {
1873 // Make sure the receiver is loaded into a register.
1874 assert(method->size_of_parameters() > 0, "oob");
1875 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1876 VMReg r = regs[0].first();
1877 assert(r->is_valid(), "bad receiver arg");
1878 if (r->is_stack()) {
1879 // Porting note: This assumes that compiled calling conventions always
1880 // pass the receiver oop in a register. If this is not true on some
1881 // platform, pick a temp and load the receiver from stack.
1882 fatal("receiver always in a register");
1883 receiver_reg = j_rarg0; // known to be free at this point
1884 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1885 } else {
1886 // no data motion is needed
1887 receiver_reg = r->as_Register();
1888 }
1889 }
1890
1891 // Figure out which address we are really jumping to:
1892 MethodHandles::generate_method_handle_dispatch(masm, iid,
1893 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1894 }
1895
1896 // ---------------------------------------------------------------------------
1897 // Generate a native wrapper for a given method. The method takes arguments
1898 // in the Java compiled code convention, marshals them to the native
1899 // convention (handlizes oops, etc), transitions to native, makes the call,
1900 // returns to java state (possibly blocking), unhandlizes any result and
1901 // returns.
1902 //
1903 // Critical native functions are a shorthand for the use of
1904 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1905 // functions. The wrapper is expected to unpack the arguments before
1906 // passing them to the callee and perform checks before and after the
1907 // native call to ensure that they GCLocker
1908 // lock_critical/unlock_critical semantics are followed. Some other
1909 // parts of JNI setup are skipped like the tear down of the JNI handle
1910 // block and the check for pending exceptions it's impossible for them
1911 // to be thrown.
1912 //
1913 // They are roughly structured like this:
1914 // if (GCLocker::needs_gc())
1915 // SharedRuntime::block_for_jni_critical();
1916 // tranistion to thread_in_native
1917 // unpack arrray arguments and call native entry point
1918 // check for safepoint in progress
1919 // check if any thread suspend flags are set
1920 // call into JVM and possible unlock the JNI critical
1921 // if a GC was suppressed while in the critical native.
1922 // transition back to thread_in_Java
1923 // return to caller
1924 //
generate_native_wrapper(MacroAssembler * masm,const methodHandle & method,int compile_id,BasicType * in_sig_bt,VMRegPair * in_regs,BasicType ret_type,address critical_entry)1925 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1926 const methodHandle& method,
1927 int compile_id,
1928 BasicType* in_sig_bt,
1929 VMRegPair* in_regs,
1930 BasicType ret_type,
1931 address critical_entry) {
1932 if (method->is_method_handle_intrinsic()) {
1933 vmIntrinsics::ID iid = method->intrinsic_id();
1934 intptr_t start = (intptr_t)__ pc();
1935 int vep_offset = ((intptr_t)__ pc()) - start;
1936 gen_special_dispatch(masm,
1937 method,
1938 in_sig_bt,
1939 in_regs);
1940 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1941 __ flush();
1942 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1943 return nmethod::new_native_nmethod(method,
1944 compile_id,
1945 masm->code(),
1946 vep_offset,
1947 frame_complete,
1948 stack_slots / VMRegImpl::slots_per_word,
1949 in_ByteSize(-1),
1950 in_ByteSize(-1),
1951 (OopMapSet*)NULL);
1952 }
1953 bool is_critical_native = true;
1954 address native_func = critical_entry;
1955 if (native_func == NULL) {
1956 native_func = method->native_function();
1957 is_critical_native = false;
1958 }
1959 assert(native_func != NULL, "must have function");
1960
1961 // An OopMap for lock (and class if static)
1962 OopMapSet *oop_maps = new OopMapSet();
1963 intptr_t start = (intptr_t)__ pc();
1964
1965 // We have received a description of where all the java arg are located
1966 // on entry to the wrapper. We need to convert these args to where
1967 // the jni function will expect them. To figure out where they go
1968 // we convert the java signature to a C signature by inserting
1969 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1970
1971 const int total_in_args = method->size_of_parameters();
1972 int total_c_args = total_in_args;
1973 if (!is_critical_native) {
1974 total_c_args += 1;
1975 if (method->is_static()) {
1976 total_c_args++;
1977 }
1978 } else {
1979 for (int i = 0; i < total_in_args; i++) {
1980 if (in_sig_bt[i] == T_ARRAY) {
1981 total_c_args++;
1982 }
1983 }
1984 }
1985
1986 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1987 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1988 BasicType* in_elem_bt = NULL;
1989
1990 int argc = 0;
1991 if (!is_critical_native) {
1992 out_sig_bt[argc++] = T_ADDRESS;
1993 if (method->is_static()) {
1994 out_sig_bt[argc++] = T_OBJECT;
1995 }
1996
1997 for (int i = 0; i < total_in_args ; i++ ) {
1998 out_sig_bt[argc++] = in_sig_bt[i];
1999 }
2000 } else {
2001 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
2002 SignatureStream ss(method->signature());
2003 for (int i = 0; i < total_in_args ; i++ ) {
2004 if (in_sig_bt[i] == T_ARRAY) {
2005 // Arrays are passed as int, elem* pair
2006 out_sig_bt[argc++] = T_INT;
2007 out_sig_bt[argc++] = T_ADDRESS;
2008 Symbol* atype = ss.as_symbol();
2009 const char* at = atype->as_C_string();
2010 if (strlen(at) == 2) {
2011 assert(at[0] == '[', "must be");
2012 switch (at[1]) {
2013 case 'B': in_elem_bt[i] = T_BYTE; break;
2014 case 'C': in_elem_bt[i] = T_CHAR; break;
2015 case 'D': in_elem_bt[i] = T_DOUBLE; break;
2016 case 'F': in_elem_bt[i] = T_FLOAT; break;
2017 case 'I': in_elem_bt[i] = T_INT; break;
2018 case 'J': in_elem_bt[i] = T_LONG; break;
2019 case 'S': in_elem_bt[i] = T_SHORT; break;
2020 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
2021 default: ShouldNotReachHere();
2022 }
2023 }
2024 } else {
2025 out_sig_bt[argc++] = in_sig_bt[i];
2026 in_elem_bt[i] = T_VOID;
2027 }
2028 if (in_sig_bt[i] != T_VOID) {
2029 assert(in_sig_bt[i] == ss.type(), "must match");
2030 ss.next();
2031 }
2032 }
2033 }
2034
2035 // Now figure out where the args must be stored and how much stack space
2036 // they require.
2037 int out_arg_slots;
2038 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2039
2040 // Compute framesize for the wrapper. We need to handlize all oops in
2041 // incoming registers
2042
2043 // Calculate the total number of stack slots we will need.
2044
2045 // First count the abi requirement plus all of the outgoing args
2046 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2047
2048 // Now the space for the inbound oop handle area
2049 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
2050 if (is_critical_native) {
2051 // Critical natives may have to call out so they need a save area
2052 // for register arguments.
2053 int double_slots = 0;
2054 int single_slots = 0;
2055 for ( int i = 0; i < total_in_args; i++) {
2056 if (in_regs[i].first()->is_Register()) {
2057 const Register reg = in_regs[i].first()->as_Register();
2058 switch (in_sig_bt[i]) {
2059 case T_BOOLEAN:
2060 case T_BYTE:
2061 case T_SHORT:
2062 case T_CHAR:
2063 case T_INT: single_slots++; break;
2064 case T_ARRAY: // specific to LP64 (7145024)
2065 case T_LONG: double_slots++; break;
2066 default: ShouldNotReachHere();
2067 }
2068 } else if (in_regs[i].first()->is_XMMRegister()) {
2069 switch (in_sig_bt[i]) {
2070 case T_FLOAT: single_slots++; break;
2071 case T_DOUBLE: double_slots++; break;
2072 default: ShouldNotReachHere();
2073 }
2074 } else if (in_regs[i].first()->is_FloatRegister()) {
2075 ShouldNotReachHere();
2076 }
2077 }
2078 total_save_slots = double_slots * 2 + single_slots;
2079 // align the save area
2080 if (double_slots != 0) {
2081 stack_slots = align_up(stack_slots, 2);
2082 }
2083 }
2084
2085 int oop_handle_offset = stack_slots;
2086 stack_slots += total_save_slots;
2087
2088 // Now any space we need for handlizing a klass if static method
2089
2090 int klass_slot_offset = 0;
2091 int klass_offset = -1;
2092 int lock_slot_offset = 0;
2093 bool is_static = false;
2094
2095 if (method->is_static()) {
2096 klass_slot_offset = stack_slots;
2097 stack_slots += VMRegImpl::slots_per_word;
2098 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2099 is_static = true;
2100 }
2101
2102 // Plus a lock if needed
2103
2104 if (method->is_synchronized()) {
2105 lock_slot_offset = stack_slots;
2106 stack_slots += VMRegImpl::slots_per_word;
2107 }
2108
2109 // Now a place (+2) to save return values or temp during shuffling
2110 // + 4 for return address (which we own) and saved rbp
2111 stack_slots += 6;
2112
2113 // Ok The space we have allocated will look like:
2114 //
2115 //
2116 // FP-> | |
2117 // |---------------------|
2118 // | 2 slots for moves |
2119 // |---------------------|
2120 // | lock box (if sync) |
2121 // |---------------------| <- lock_slot_offset
2122 // | klass (if static) |
2123 // |---------------------| <- klass_slot_offset
2124 // | oopHandle area |
2125 // |---------------------| <- oop_handle_offset (6 java arg registers)
2126 // | outbound memory |
2127 // | based arguments |
2128 // | |
2129 // |---------------------|
2130 // | |
2131 // SP-> | out_preserved_slots |
2132 //
2133 //
2134
2135
2136 // Now compute actual number of stack words we need rounding to make
2137 // stack properly aligned.
2138 stack_slots = align_up(stack_slots, StackAlignmentInSlots);
2139
2140 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2141
2142 // First thing make an ic check to see if we should even be here
2143
2144 // We are free to use all registers as temps without saving them and
2145 // restoring them except rbp. rbp is the only callee save register
2146 // as far as the interpreter and the compiler(s) are concerned.
2147
2148
2149 const Register ic_reg = rax;
2150 const Register receiver = j_rarg0;
2151
2152 Label hit;
2153 Label exception_pending;
2154
2155 assert_different_registers(ic_reg, receiver, rscratch1);
2156 __ verify_oop(receiver);
2157 __ load_klass(rscratch1, receiver);
2158 __ cmpq(ic_reg, rscratch1);
2159 __ jcc(Assembler::equal, hit);
2160
2161 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2162
2163 // Verified entry point must be aligned
2164 __ align(8);
2165
2166 __ bind(hit);
2167
2168 int vep_offset = ((intptr_t)__ pc()) - start;
2169
2170 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
2171 Label L_skip_barrier;
2172 Register klass = r10;
2173 __ mov_metadata(klass, method->method_holder()); // InstanceKlass*
2174 __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
2175
2176 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
2177
2178 __ bind(L_skip_barrier);
2179 }
2180
2181 #ifdef COMPILER1
2182 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
2183 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
2184 inline_check_hashcode_from_object_header(masm, method, j_rarg0 /*obj_reg*/, rax /*result*/);
2185 }
2186 #endif // COMPILER1
2187
2188 // The instruction at the verified entry point must be 5 bytes or longer
2189 // because it can be patched on the fly by make_non_entrant. The stack bang
2190 // instruction fits that requirement.
2191
2192 // Generate stack overflow check
2193
2194 if (UseStackBanging) {
2195 __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
2196 } else {
2197 // need a 5 byte instruction to allow MT safe patching to non-entrant
2198 __ fat_nop();
2199 }
2200
2201 // Generate a new frame for the wrapper.
2202 __ enter();
2203 // -2 because return address is already present and so is saved rbp
2204 __ subptr(rsp, stack_size - 2*wordSize);
2205
2206 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
2207 bs->nmethod_entry_barrier(masm);
2208
2209 // Frame is now completed as far as size and linkage.
2210 int frame_complete = ((intptr_t)__ pc()) - start;
2211
2212 if (UseRTMLocking) {
2213 // Abort RTM transaction before calling JNI
2214 // because critical section will be large and will be
2215 // aborted anyway. Also nmethod could be deoptimized.
2216 __ xabort(0);
2217 }
2218
2219 #ifdef ASSERT
2220 {
2221 Label L;
2222 __ mov(rax, rsp);
2223 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2224 __ cmpptr(rax, rsp);
2225 __ jcc(Assembler::equal, L);
2226 __ stop("improperly aligned stack");
2227 __ bind(L);
2228 }
2229 #endif /* ASSERT */
2230
2231
2232 // We use r14 as the oop handle for the receiver/klass
2233 // It is callee save so it survives the call to native
2234
2235 const Register oop_handle_reg = r14;
2236
2237 if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
2238 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2239 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2240 }
2241
2242 //
2243 // We immediately shuffle the arguments so that any vm call we have to
2244 // make from here on out (sync slow path, jvmti, etc.) we will have
2245 // captured the oops from our caller and have a valid oopMap for
2246 // them.
2247
2248 // -----------------
2249 // The Grand Shuffle
2250
2251 // The Java calling convention is either equal (linux) or denser (win64) than the
2252 // c calling convention. However the because of the jni_env argument the c calling
2253 // convention always has at least one more (and two for static) arguments than Java.
2254 // Therefore if we move the args from java -> c backwards then we will never have
2255 // a register->register conflict and we don't have to build a dependency graph
2256 // and figure out how to break any cycles.
2257 //
2258
2259 // Record esp-based slot for receiver on stack for non-static methods
2260 int receiver_offset = -1;
2261
2262 // This is a trick. We double the stack slots so we can claim
2263 // the oops in the caller's frame. Since we are sure to have
2264 // more args than the caller doubling is enough to make
2265 // sure we can capture all the incoming oop args from the
2266 // caller.
2267 //
2268 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2269
2270 // Mark location of rbp (someday)
2271 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2272
2273 // Use eax, ebx as temporaries during any memory-memory moves we have to do
2274 // All inbound args are referenced based on rbp and all outbound args via rsp.
2275
2276
2277 #ifdef ASSERT
2278 bool reg_destroyed[RegisterImpl::number_of_registers];
2279 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2280 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2281 reg_destroyed[r] = false;
2282 }
2283 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2284 freg_destroyed[f] = false;
2285 }
2286
2287 #endif /* ASSERT */
2288
2289 // This may iterate in two different directions depending on the
2290 // kind of native it is. The reason is that for regular JNI natives
2291 // the incoming and outgoing registers are offset upwards and for
2292 // critical natives they are offset down.
2293 GrowableArray<int> arg_order(2 * total_in_args);
2294 // Inbound arguments that need to be pinned for critical natives
2295 GrowableArray<int> pinned_args(total_in_args);
2296 // Current stack slot for storing register based array argument
2297 int pinned_slot = oop_handle_offset;
2298
2299 VMRegPair tmp_vmreg;
2300 tmp_vmreg.set2(rbx->as_VMReg());
2301
2302 if (!is_critical_native) {
2303 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2304 arg_order.push(i);
2305 arg_order.push(c_arg);
2306 }
2307 } else {
2308 // Compute a valid move order, using tmp_vmreg to break any cycles
2309 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2310 }
2311
2312 int temploc = -1;
2313 for (int ai = 0; ai < arg_order.length(); ai += 2) {
2314 int i = arg_order.at(ai);
2315 int c_arg = arg_order.at(ai + 1);
2316 __ block_comment(err_msg("move %d -> %d", i, c_arg));
2317 if (c_arg == -1) {
2318 assert(is_critical_native, "should only be required for critical natives");
2319 // This arg needs to be moved to a temporary
2320 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2321 in_regs[i] = tmp_vmreg;
2322 temploc = i;
2323 continue;
2324 } else if (i == -1) {
2325 assert(is_critical_native, "should only be required for critical natives");
2326 // Read from the temporary location
2327 assert(temploc != -1, "must be valid");
2328 i = temploc;
2329 temploc = -1;
2330 }
2331 #ifdef ASSERT
2332 if (in_regs[i].first()->is_Register()) {
2333 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2334 } else if (in_regs[i].first()->is_XMMRegister()) {
2335 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2336 }
2337 if (out_regs[c_arg].first()->is_Register()) {
2338 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2339 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2340 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2341 }
2342 #endif /* ASSERT */
2343 switch (in_sig_bt[i]) {
2344 case T_ARRAY:
2345 if (is_critical_native) {
2346 // pin before unpack
2347 if (Universe::heap()->supports_object_pinning()) {
2348 save_args(masm, total_c_args, 0, out_regs);
2349 gen_pin_object(masm, in_regs[i]);
2350 pinned_args.append(i);
2351 restore_args(masm, total_c_args, 0, out_regs);
2352
2353 // rax has pinned array
2354 VMRegPair result_reg;
2355 result_reg.set_ptr(rax->as_VMReg());
2356 move_ptr(masm, result_reg, in_regs[i]);
2357 if (!in_regs[i].first()->is_stack()) {
2358 assert(pinned_slot <= stack_slots, "overflow");
2359 move_ptr(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
2360 pinned_slot += VMRegImpl::slots_per_word;
2361 }
2362 }
2363 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2364 c_arg++;
2365 #ifdef ASSERT
2366 if (out_regs[c_arg].first()->is_Register()) {
2367 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2368 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2369 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2370 }
2371 #endif
2372 break;
2373 }
2374 case T_OBJECT:
2375 assert(!is_critical_native, "no oop arguments");
2376 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2377 ((i == 0) && (!is_static)),
2378 &receiver_offset);
2379 break;
2380 case T_VOID:
2381 break;
2382
2383 case T_FLOAT:
2384 float_move(masm, in_regs[i], out_regs[c_arg]);
2385 break;
2386
2387 case T_DOUBLE:
2388 assert( i + 1 < total_in_args &&
2389 in_sig_bt[i + 1] == T_VOID &&
2390 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2391 double_move(masm, in_regs[i], out_regs[c_arg]);
2392 break;
2393
2394 case T_LONG :
2395 long_move(masm, in_regs[i], out_regs[c_arg]);
2396 break;
2397
2398 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2399
2400 default:
2401 move32_64(masm, in_regs[i], out_regs[c_arg]);
2402 }
2403 }
2404
2405 int c_arg;
2406
2407 // Pre-load a static method's oop into r14. Used both by locking code and
2408 // the normal JNI call code.
2409 if (!is_critical_native) {
2410 // point c_arg at the first arg that is already loaded in case we
2411 // need to spill before we call out
2412 c_arg = total_c_args - total_in_args;
2413
2414 if (method->is_static()) {
2415
2416 // load oop into a register
2417 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2418
2419 // Now handlize the static class mirror it's known not-null.
2420 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2421 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2422
2423 // Now get the handle
2424 __ lea(oop_handle_reg, Address(rsp, klass_offset));
2425 // store the klass handle as second argument
2426 __ movptr(c_rarg1, oop_handle_reg);
2427 // and protect the arg if we must spill
2428 c_arg--;
2429 }
2430 } else {
2431 // For JNI critical methods we need to save all registers in save_args.
2432 c_arg = 0;
2433 }
2434
2435 // Change state to native (we save the return address in the thread, since it might not
2436 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2437 // points into the right code segment. It does not have to be the correct return pc.
2438 // We use the same pc/oopMap repeatedly when we call out
2439
2440 intptr_t the_pc = (intptr_t) __ pc();
2441 oop_maps->add_gc_map(the_pc - start, map);
2442
2443 __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2444
2445
2446 // We have all of the arguments setup at this point. We must not touch any register
2447 // argument registers at this point (what if we save/restore them there are no oop?
2448
2449 {
2450 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2451 // protect the args we've loaded
2452 save_args(masm, total_c_args, c_arg, out_regs);
2453 __ mov_metadata(c_rarg1, method());
2454 __ call_VM_leaf(
2455 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2456 r15_thread, c_rarg1);
2457 restore_args(masm, total_c_args, c_arg, out_regs);
2458 }
2459
2460 // RedefineClasses() tracing support for obsolete method entry
2461 if (log_is_enabled(Trace, redefine, class, obsolete)) {
2462 // protect the args we've loaded
2463 save_args(masm, total_c_args, c_arg, out_regs);
2464 __ mov_metadata(c_rarg1, method());
2465 __ call_VM_leaf(
2466 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2467 r15_thread, c_rarg1);
2468 restore_args(masm, total_c_args, c_arg, out_regs);
2469 }
2470
2471 // Lock a synchronized method
2472
2473 // Register definitions used by locking and unlocking
2474
2475 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
2476 const Register obj_reg = rbx; // Will contain the oop
2477 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
2478 const Register old_hdr = r13; // value of old header at unlock time
2479
2480 Label slow_path_lock;
2481 Label lock_done;
2482
2483 if (method->is_synchronized()) {
2484 assert(!is_critical_native, "unhandled");
2485
2486
2487 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2488
2489 // Get the handle (the 2nd argument)
2490 __ mov(oop_handle_reg, c_rarg1);
2491
2492 // Get address of the box
2493
2494 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2495
2496 // Load the oop from the handle
2497 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2498
2499 __ resolve(IS_NOT_NULL, obj_reg);
2500 if (UseBiasedLocking) {
2501 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2502 }
2503
2504 // Load immediate 1 into swap_reg %rax
2505 __ movl(swap_reg, 1);
2506
2507 // Load (object->mark() | 1) into swap_reg %rax
2508 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2509
2510 // Save (object->mark() | 1) into BasicLock's displaced header
2511 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2512
2513 // src -> dest iff dest == rax else rax <- dest
2514 __ lock();
2515 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2516 __ jcc(Assembler::equal, lock_done);
2517
2518 // Hmm should this move to the slow path code area???
2519
2520 // Test if the oopMark is an obvious stack pointer, i.e.,
2521 // 1) (mark & 3) == 0, and
2522 // 2) rsp <= mark < mark + os::pagesize()
2523 // These 3 tests can be done by evaluating the following
2524 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2525 // assuming both stack pointer and pagesize have their
2526 // least significant 2 bits clear.
2527 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2528
2529 __ subptr(swap_reg, rsp);
2530 __ andptr(swap_reg, 3 - os::vm_page_size());
2531
2532 // Save the test result, for recursive case, the result is zero
2533 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2534 __ jcc(Assembler::notEqual, slow_path_lock);
2535
2536 // Slow path will re-enter here
2537
2538 __ bind(lock_done);
2539 }
2540
2541
2542 // Finally just about ready to make the JNI call
2543
2544
2545 // get JNIEnv* which is first argument to native
2546 if (!is_critical_native) {
2547 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2548 }
2549
2550 // Now set thread in native
2551 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2552
2553 __ call(RuntimeAddress(native_func));
2554
2555 // Verify or restore cpu control state after JNI call
2556 __ restore_cpu_control_state_after_jni();
2557
2558 // Unpack native results.
2559 switch (ret_type) {
2560 case T_BOOLEAN: __ c2bool(rax); break;
2561 case T_CHAR : __ movzwl(rax, rax); break;
2562 case T_BYTE : __ sign_extend_byte (rax); break;
2563 case T_SHORT : __ sign_extend_short(rax); break;
2564 case T_INT : /* nothing to do */ break;
2565 case T_DOUBLE :
2566 case T_FLOAT :
2567 // Result is in xmm0 we'll save as needed
2568 break;
2569 case T_ARRAY: // Really a handle
2570 case T_OBJECT: // Really a handle
2571 break; // can't de-handlize until after safepoint check
2572 case T_VOID: break;
2573 case T_LONG: break;
2574 default : ShouldNotReachHere();
2575 }
2576
2577 // unpin pinned arguments
2578 pinned_slot = oop_handle_offset;
2579 if (pinned_args.length() > 0) {
2580 // save return value that may be overwritten otherwise.
2581 save_native_result(masm, ret_type, stack_slots);
2582 for (int index = 0; index < pinned_args.length(); index ++) {
2583 int i = pinned_args.at(index);
2584 assert(pinned_slot <= stack_slots, "overflow");
2585 if (!in_regs[i].first()->is_stack()) {
2586 int offset = pinned_slot * VMRegImpl::stack_slot_size;
2587 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
2588 pinned_slot += VMRegImpl::slots_per_word;
2589 }
2590 gen_unpin_object(masm, in_regs[i]);
2591 }
2592 restore_native_result(masm, ret_type, stack_slots);
2593 }
2594
2595 // Switch thread to "native transition" state before reading the synchronization state.
2596 // This additional state is necessary because reading and testing the synchronization
2597 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2598 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2599 // VM thread changes sync state to synchronizing and suspends threads for GC.
2600 // Thread A is resumed to finish this native method, but doesn't block here since it
2601 // didn't see any synchronization is progress, and escapes.
2602 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2603
2604 // Force this write out before the read below
2605 __ membar(Assembler::Membar_mask_bits(
2606 Assembler::LoadLoad | Assembler::LoadStore |
2607 Assembler::StoreLoad | Assembler::StoreStore));
2608
2609 Label after_transition;
2610
2611 // check for safepoint operation in progress and/or pending suspend requests
2612 {
2613 Label Continue;
2614 Label slow_path;
2615
2616 __ safepoint_poll(slow_path, r15_thread, rscratch1);
2617
2618 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2619 __ jcc(Assembler::equal, Continue);
2620 __ bind(slow_path);
2621
2622 // Don't use call_VM as it will see a possible pending exception and forward it
2623 // and never return here preventing us from clearing _last_native_pc down below.
2624 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2625 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2626 // by hand.
2627 //
2628 __ vzeroupper();
2629 save_native_result(masm, ret_type, stack_slots);
2630 __ mov(c_rarg0, r15_thread);
2631 __ mov(r12, rsp); // remember sp
2632 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2633 __ andptr(rsp, -16); // align stack as required by ABI
2634 if (!is_critical_native) {
2635 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2636 } else {
2637 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2638 }
2639 __ mov(rsp, r12); // restore sp
2640 __ reinit_heapbase();
2641 // Restore any method result value
2642 restore_native_result(masm, ret_type, stack_slots);
2643
2644 if (is_critical_native) {
2645 // The call above performed the transition to thread_in_Java so
2646 // skip the transition logic below.
2647 __ jmpb(after_transition);
2648 }
2649
2650 __ bind(Continue);
2651 }
2652
2653 // change thread state
2654 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2655 __ bind(after_transition);
2656
2657 Label reguard;
2658 Label reguard_done;
2659 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2660 __ jcc(Assembler::equal, reguard);
2661 __ bind(reguard_done);
2662
2663 // native result if any is live
2664
2665 // Unlock
2666 Label unlock_done;
2667 Label slow_path_unlock;
2668 if (method->is_synchronized()) {
2669
2670 // Get locked oop from the handle we passed to jni
2671 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2672 __ resolve(IS_NOT_NULL, obj_reg);
2673
2674 Label done;
2675
2676 if (UseBiasedLocking) {
2677 __ biased_locking_exit(obj_reg, old_hdr, done);
2678 }
2679
2680 // Simple recursive lock?
2681
2682 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2683 __ jcc(Assembler::equal, done);
2684
2685 // Must save rax if if it is live now because cmpxchg must use it
2686 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2687 save_native_result(masm, ret_type, stack_slots);
2688 }
2689
2690
2691 // get address of the stack lock
2692 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2693 // get old displaced header
2694 __ movptr(old_hdr, Address(rax, 0));
2695
2696 // Atomic swap old header if oop still contains the stack lock
2697 __ lock();
2698 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2699 __ jcc(Assembler::notEqual, slow_path_unlock);
2700
2701 // slow path re-enters here
2702 __ bind(unlock_done);
2703 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2704 restore_native_result(masm, ret_type, stack_slots);
2705 }
2706
2707 __ bind(done);
2708
2709 }
2710 {
2711 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2712 save_native_result(masm, ret_type, stack_slots);
2713 __ mov_metadata(c_rarg1, method());
2714 __ call_VM_leaf(
2715 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2716 r15_thread, c_rarg1);
2717 restore_native_result(masm, ret_type, stack_slots);
2718 }
2719
2720 __ reset_last_Java_frame(false);
2721
2722 // Unbox oop result, e.g. JNIHandles::resolve value.
2723 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2724 __ resolve_jobject(rax /* value */,
2725 r15_thread /* thread */,
2726 rcx /* tmp */);
2727 }
2728
2729 if (CheckJNICalls) {
2730 // clear_pending_jni_exception_check
2731 __ movptr(Address(r15_thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
2732 }
2733
2734 if (!is_critical_native) {
2735 // reset handle block
2736 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2737 __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2738 }
2739
2740 // pop our frame
2741
2742 __ leave();
2743
2744 if (!is_critical_native) {
2745 // Any exception pending?
2746 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2747 __ jcc(Assembler::notEqual, exception_pending);
2748 }
2749
2750 // Return
2751
2752 __ ret(0);
2753
2754 // Unexpected paths are out of line and go here
2755
2756 if (!is_critical_native) {
2757 // forward the exception
2758 __ bind(exception_pending);
2759
2760 // and forward the exception
2761 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2762 }
2763
2764 // Slow path locking & unlocking
2765 if (method->is_synchronized()) {
2766
2767 // BEGIN Slow path lock
2768 __ bind(slow_path_lock);
2769
2770 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2771 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2772
2773 // protect the args we've loaded
2774 save_args(masm, total_c_args, c_arg, out_regs);
2775
2776 __ mov(c_rarg0, obj_reg);
2777 __ mov(c_rarg1, lock_reg);
2778 __ mov(c_rarg2, r15_thread);
2779
2780 // Not a leaf but we have last_Java_frame setup as we want
2781 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2782 restore_args(masm, total_c_args, c_arg, out_regs);
2783
2784 #ifdef ASSERT
2785 { Label L;
2786 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2787 __ jcc(Assembler::equal, L);
2788 __ stop("no pending exception allowed on exit from monitorenter");
2789 __ bind(L);
2790 }
2791 #endif
2792 __ jmp(lock_done);
2793
2794 // END Slow path lock
2795
2796 // BEGIN Slow path unlock
2797 __ bind(slow_path_unlock);
2798
2799 // If we haven't already saved the native result we must save it now as xmm registers
2800 // are still exposed.
2801 __ vzeroupper();
2802 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2803 save_native_result(masm, ret_type, stack_slots);
2804 }
2805
2806 __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2807
2808 __ mov(c_rarg0, obj_reg);
2809 __ mov(c_rarg2, r15_thread);
2810 __ mov(r12, rsp); // remember sp
2811 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2812 __ andptr(rsp, -16); // align stack as required by ABI
2813
2814 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2815 // NOTE that obj_reg == rbx currently
2816 __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2817 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2818
2819 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2820 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2821 __ mov(rsp, r12); // restore sp
2822 __ reinit_heapbase();
2823 #ifdef ASSERT
2824 {
2825 Label L;
2826 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2827 __ jcc(Assembler::equal, L);
2828 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2829 __ bind(L);
2830 }
2831 #endif /* ASSERT */
2832
2833 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
2834
2835 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2836 restore_native_result(masm, ret_type, stack_slots);
2837 }
2838 __ jmp(unlock_done);
2839
2840 // END Slow path unlock
2841
2842 } // synchronized
2843
2844 // SLOW PATH Reguard the stack if needed
2845
2846 __ bind(reguard);
2847 __ vzeroupper();
2848 save_native_result(masm, ret_type, stack_slots);
2849 __ mov(r12, rsp); // remember sp
2850 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2851 __ andptr(rsp, -16); // align stack as required by ABI
2852 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2853 __ mov(rsp, r12); // restore sp
2854 __ reinit_heapbase();
2855 restore_native_result(masm, ret_type, stack_slots);
2856 // and continue
2857 __ jmp(reguard_done);
2858
2859
2860
2861 __ flush();
2862
2863 nmethod *nm = nmethod::new_native_nmethod(method,
2864 compile_id,
2865 masm->code(),
2866 vep_offset,
2867 frame_complete,
2868 stack_slots / VMRegImpl::slots_per_word,
2869 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2870 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2871 oop_maps);
2872
2873 if (is_critical_native) {
2874 nm->set_lazy_critical_native(true);
2875 }
2876
2877 return nm;
2878
2879 }
2880
2881 // this function returns the adjust size (in number of words) to a c2i adapter
2882 // activation for use during deoptimization
last_frame_adjust(int callee_parameters,int callee_locals)2883 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2884 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2885 }
2886
2887
out_preserve_stack_slots()2888 uint SharedRuntime::out_preserve_stack_slots() {
2889 return 0;
2890 }
2891
2892 //------------------------------generate_deopt_blob----------------------------
generate_deopt_blob()2893 void SharedRuntime::generate_deopt_blob() {
2894 // Allocate space for the code
2895 ResourceMark rm;
2896 // Setup code generation tools
2897 int pad = 0;
2898 #if INCLUDE_JVMCI
2899 if (EnableJVMCI || UseAOT) {
2900 pad += 512; // Increase the buffer size when compiling for JVMCI
2901 }
2902 #endif
2903 CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
2904 MacroAssembler* masm = new MacroAssembler(&buffer);
2905 int frame_size_in_words;
2906 OopMap* map = NULL;
2907 OopMapSet *oop_maps = new OopMapSet();
2908
2909 // -------------
2910 // This code enters when returning to a de-optimized nmethod. A return
2911 // address has been pushed on the the stack, and return values are in
2912 // registers.
2913 // If we are doing a normal deopt then we were called from the patched
2914 // nmethod from the point we returned to the nmethod. So the return
2915 // address on the stack is wrong by NativeCall::instruction_size
2916 // We will adjust the value so it looks like we have the original return
2917 // address on the stack (like when we eagerly deoptimized).
2918 // In the case of an exception pending when deoptimizing, we enter
2919 // with a return address on the stack that points after the call we patched
2920 // into the exception handler. We have the following register state from,
2921 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2922 // rax: exception oop
2923 // rbx: exception handler
2924 // rdx: throwing pc
2925 // So in this case we simply jam rdx into the useless return address and
2926 // the stack looks just like we want.
2927 //
2928 // At this point we need to de-opt. We save the argument return
2929 // registers. We call the first C routine, fetch_unroll_info(). This
2930 // routine captures the return values and returns a structure which
2931 // describes the current frame size and the sizes of all replacement frames.
2932 // The current frame is compiled code and may contain many inlined
2933 // functions, each with their own JVM state. We pop the current frame, then
2934 // push all the new frames. Then we call the C routine unpack_frames() to
2935 // populate these frames. Finally unpack_frames() returns us the new target
2936 // address. Notice that callee-save registers are BLOWN here; they have
2937 // already been captured in the vframeArray at the time the return PC was
2938 // patched.
2939 address start = __ pc();
2940 Label cont;
2941
2942 // Prolog for non exception case!
2943
2944 // Save everything in sight.
2945 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2946
2947 // Normal deoptimization. Save exec mode for unpack_frames.
2948 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
2949 __ jmp(cont);
2950
2951 int reexecute_offset = __ pc() - start;
2952 #if INCLUDE_JVMCI && !defined(COMPILER1)
2953 if (EnableJVMCI && UseJVMCICompiler) {
2954 // JVMCI does not use this kind of deoptimization
2955 __ should_not_reach_here();
2956 }
2957 #endif
2958
2959 // Reexecute case
2960 // return address is the pc describes what bci to do re-execute at
2961
2962 // No need to update map as each call to save_live_registers will produce identical oopmap
2963 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2964
2965 __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
2966 __ jmp(cont);
2967
2968 #if INCLUDE_JVMCI
2969 Label after_fetch_unroll_info_call;
2970 int implicit_exception_uncommon_trap_offset = 0;
2971 int uncommon_trap_offset = 0;
2972
2973 if (EnableJVMCI || UseAOT) {
2974 implicit_exception_uncommon_trap_offset = __ pc() - start;
2975
2976 __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2977 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), (int32_t)NULL_WORD);
2978
2979 uncommon_trap_offset = __ pc() - start;
2980
2981 // Save everything in sight.
2982 RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2983 // fetch_unroll_info needs to call last_java_frame()
2984 __ set_last_Java_frame(noreg, noreg, NULL);
2985
2986 __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())));
2987 __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1);
2988
2989 __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute);
2990 __ mov(c_rarg0, r15_thread);
2991 __ movl(c_rarg2, r14); // exec mode
2992 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2993 oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2994
2995 __ reset_last_Java_frame(false);
2996
2997 __ jmp(after_fetch_unroll_info_call);
2998 } // EnableJVMCI
2999 #endif // INCLUDE_JVMCI
3000
3001 int exception_offset = __ pc() - start;
3002
3003 // Prolog for exception case
3004
3005 // all registers are dead at this entry point, except for rax, and
3006 // rdx which contain the exception oop and exception pc
3007 // respectively. Set them in TLS and fall thru to the
3008 // unpack_with_exception_in_tls entry point.
3009
3010 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3011 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
3012
3013 int exception_in_tls_offset = __ pc() - start;
3014
3015 // new implementation because exception oop is now passed in JavaThread
3016
3017 // Prolog for exception case
3018 // All registers must be preserved because they might be used by LinearScan
3019 // Exceptiop oop and throwing PC are passed in JavaThread
3020 // tos: stack at point of call to method that threw the exception (i.e. only
3021 // args are on the stack, no return address)
3022
3023 // make room on stack for the return address
3024 // It will be patched later with the throwing pc. The correct value is not
3025 // available now because loading it from memory would destroy registers.
3026 __ push(0);
3027
3028 // Save everything in sight.
3029 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3030
3031 // Now it is safe to overwrite any register
3032
3033 // Deopt during an exception. Save exec mode for unpack_frames.
3034 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
3035
3036 // load throwing pc from JavaThread and patch it as the return address
3037 // of the current frame. Then clear the field in JavaThread
3038
3039 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3040 __ movptr(Address(rbp, wordSize), rdx);
3041 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3042
3043 #ifdef ASSERT
3044 // verify that there is really an exception oop in JavaThread
3045 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3046 __ verify_oop(rax);
3047
3048 // verify that there is no pending exception
3049 Label no_pending_exception;
3050 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3051 __ testptr(rax, rax);
3052 __ jcc(Assembler::zero, no_pending_exception);
3053 __ stop("must not have pending exception here");
3054 __ bind(no_pending_exception);
3055 #endif
3056
3057 __ bind(cont);
3058
3059 // Call C code. Need thread and this frame, but NOT official VM entry
3060 // crud. We cannot block on this call, no GC can happen.
3061 //
3062 // UnrollBlock* fetch_unroll_info(JavaThread* thread)
3063
3064 // fetch_unroll_info needs to call last_java_frame().
3065
3066 __ set_last_Java_frame(noreg, noreg, NULL);
3067 #ifdef ASSERT
3068 { Label L;
3069 __ cmpptr(Address(r15_thread,
3070 JavaThread::last_Java_fp_offset()),
3071 (int32_t)0);
3072 __ jcc(Assembler::equal, L);
3073 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
3074 __ bind(L);
3075 }
3076 #endif // ASSERT
3077 __ mov(c_rarg0, r15_thread);
3078 __ movl(c_rarg1, r14); // exec_mode
3079 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
3080
3081 // Need to have an oopmap that tells fetch_unroll_info where to
3082 // find any register it might need.
3083 oop_maps->add_gc_map(__ pc() - start, map);
3084
3085 __ reset_last_Java_frame(false);
3086
3087 #if INCLUDE_JVMCI
3088 if (EnableJVMCI || UseAOT) {
3089 __ bind(after_fetch_unroll_info_call);
3090 }
3091 #endif
3092
3093 // Load UnrollBlock* into rdi
3094 __ mov(rdi, rax);
3095
3096 __ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
3097 Label noException;
3098 __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
3099 __ jcc(Assembler::notEqual, noException);
3100 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3101 // QQQ this is useless it was NULL above
3102 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3103 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
3104 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3105
3106 __ verify_oop(rax);
3107
3108 // Overwrite the result registers with the exception results.
3109 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3110 // I think this is useless
3111 __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
3112
3113 __ bind(noException);
3114
3115 // Only register save data is on the stack.
3116 // Now restore the result registers. Everything else is either dead
3117 // or captured in the vframeArray.
3118 RegisterSaver::restore_result_registers(masm);
3119
3120 // All of the register save area has been popped of the stack. Only the
3121 // return address remains.
3122
3123 // Pop all the frames we must move/replace.
3124 //
3125 // Frame picture (youngest to oldest)
3126 // 1: self-frame (no frame link)
3127 // 2: deopting frame (no frame link)
3128 // 3: caller of deopting frame (could be compiled/interpreted).
3129 //
3130 // Note: by leaving the return address of self-frame on the stack
3131 // and using the size of frame 2 to adjust the stack
3132 // when we are done the return to frame 3 will still be on the stack.
3133
3134 // Pop deoptimized frame
3135 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3136 __ addptr(rsp, rcx);
3137
3138 // rsp should be pointing at the return address to the caller (3)
3139
3140 // Pick up the initial fp we should save
3141 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3142 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3143
3144 #ifdef ASSERT
3145 // Compilers generate code that bang the stack by as much as the
3146 // interpreter would need. So this stack banging should never
3147 // trigger a fault. Verify that it does not on non product builds.
3148 if (UseStackBanging) {
3149 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3150 __ bang_stack_size(rbx, rcx);
3151 }
3152 #endif
3153
3154 // Load address of array of frame pcs into rcx
3155 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3156
3157 // Trash the old pc
3158 __ addptr(rsp, wordSize);
3159
3160 // Load address of array of frame sizes into rsi
3161 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3162
3163 // Load counter into rdx
3164 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3165
3166 // Now adjust the caller's stack to make up for the extra locals
3167 // but record the original sp so that we can save it in the skeletal interpreter
3168 // frame and the stack walking of interpreter_sender will get the unextended sp
3169 // value and not the "real" sp value.
3170
3171 const Register sender_sp = r8;
3172
3173 __ mov(sender_sp, rsp);
3174 __ movl(rbx, Address(rdi,
3175 Deoptimization::UnrollBlock::
3176 caller_adjustment_offset_in_bytes()));
3177 __ subptr(rsp, rbx);
3178
3179 // Push interpreter frames in a loop
3180 Label loop;
3181 __ bind(loop);
3182 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3183 __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand
3184 __ pushptr(Address(rcx, 0)); // Save return address
3185 __ enter(); // Save old & set new ebp
3186 __ subptr(rsp, rbx); // Prolog
3187 // This value is corrected by layout_activation_impl
3188 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3189 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3190 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3191 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3192 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3193 __ decrementl(rdx); // Decrement counter
3194 __ jcc(Assembler::notZero, loop);
3195 __ pushptr(Address(rcx, 0)); // Save final return address
3196
3197 // Re-push self-frame
3198 __ enter(); // Save old & set new ebp
3199
3200 // Allocate a full sized register save area.
3201 // Return address and rbp are in place, so we allocate two less words.
3202 __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3203
3204 // Restore frame locals after moving the frame
3205 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3206 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3207
3208 // Call C code. Need thread but NOT official VM entry
3209 // crud. We cannot block on this call, no GC can happen. Call should
3210 // restore return values to their stack-slots with the new SP.
3211 //
3212 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3213
3214 // Use rbp because the frames look interpreted now
3215 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3216 // Don't need the precise return PC here, just precise enough to point into this code blob.
3217 address the_pc = __ pc();
3218 __ set_last_Java_frame(noreg, rbp, the_pc);
3219
3220 __ andptr(rsp, -(StackAlignmentInBytes)); // Fix stack alignment as required by ABI
3221 __ mov(c_rarg0, r15_thread);
3222 __ movl(c_rarg1, r14); // second arg: exec_mode
3223 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3224 // Revert SP alignment after call since we're going to do some SP relative addressing below
3225 __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3226
3227 // Set an oopmap for the call site
3228 // Use the same PC we used for the last java frame
3229 oop_maps->add_gc_map(the_pc - start,
3230 new OopMap( frame_size_in_words, 0 ));
3231
3232 // Clear fp AND pc
3233 __ reset_last_Java_frame(true);
3234
3235 // Collect return values
3236 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3237 __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3238 // I think this is useless (throwing pc?)
3239 __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3240
3241 // Pop self-frame.
3242 __ leave(); // Epilog
3243
3244 // Jump to interpreter
3245 __ ret(0);
3246
3247 // Make sure all code is generated
3248 masm->flush();
3249
3250 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3251 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3252 #if INCLUDE_JVMCI
3253 if (EnableJVMCI || UseAOT) {
3254 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
3255 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
3256 }
3257 #endif
3258 }
3259
3260 #ifdef COMPILER2
3261 //------------------------------generate_uncommon_trap_blob--------------------
generate_uncommon_trap_blob()3262 void SharedRuntime::generate_uncommon_trap_blob() {
3263 // Allocate space for the code
3264 ResourceMark rm;
3265 // Setup code generation tools
3266 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3267 MacroAssembler* masm = new MacroAssembler(&buffer);
3268
3269 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3270
3271 address start = __ pc();
3272
3273 if (UseRTMLocking) {
3274 // Abort RTM transaction before possible nmethod deoptimization.
3275 __ xabort(0);
3276 }
3277
3278 // Push self-frame. We get here with a return address on the
3279 // stack, so rsp is 8-byte aligned until we allocate our frame.
3280 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3281
3282 // No callee saved registers. rbp is assumed implicitly saved
3283 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3284
3285 // compiler left unloaded_class_index in j_rarg0 move to where the
3286 // runtime expects it.
3287 __ movl(c_rarg1, j_rarg0);
3288
3289 __ set_last_Java_frame(noreg, noreg, NULL);
3290
3291 // Call C code. Need thread but NOT official VM entry
3292 // crud. We cannot block on this call, no GC can happen. Call should
3293 // capture callee-saved registers as well as return values.
3294 // Thread is in rdi already.
3295 //
3296 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3297
3298 __ mov(c_rarg0, r15_thread);
3299 __ movl(c_rarg2, Deoptimization::Unpack_uncommon_trap);
3300 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3301
3302 // Set an oopmap for the call site
3303 OopMapSet* oop_maps = new OopMapSet();
3304 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3305
3306 // location of rbp is known implicitly by the frame sender code
3307
3308 oop_maps->add_gc_map(__ pc() - start, map);
3309
3310 __ reset_last_Java_frame(false);
3311
3312 // Load UnrollBlock* into rdi
3313 __ mov(rdi, rax);
3314
3315 #ifdef ASSERT
3316 { Label L;
3317 __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
3318 (int32_t)Deoptimization::Unpack_uncommon_trap);
3319 __ jcc(Assembler::equal, L);
3320 __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3321 __ bind(L);
3322 }
3323 #endif
3324
3325 // Pop all the frames we must move/replace.
3326 //
3327 // Frame picture (youngest to oldest)
3328 // 1: self-frame (no frame link)
3329 // 2: deopting frame (no frame link)
3330 // 3: caller of deopting frame (could be compiled/interpreted).
3331
3332 // Pop self-frame. We have no frame, and must rely only on rax and rsp.
3333 __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3334
3335 // Pop deoptimized frame (int)
3336 __ movl(rcx, Address(rdi,
3337 Deoptimization::UnrollBlock::
3338 size_of_deoptimized_frame_offset_in_bytes()));
3339 __ addptr(rsp, rcx);
3340
3341 // rsp should be pointing at the return address to the caller (3)
3342
3343 // Pick up the initial fp we should save
3344 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3345 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3346
3347 #ifdef ASSERT
3348 // Compilers generate code that bang the stack by as much as the
3349 // interpreter would need. So this stack banging should never
3350 // trigger a fault. Verify that it does not on non product builds.
3351 if (UseStackBanging) {
3352 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3353 __ bang_stack_size(rbx, rcx);
3354 }
3355 #endif
3356
3357 // Load address of array of frame pcs into rcx (address*)
3358 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3359
3360 // Trash the return pc
3361 __ addptr(rsp, wordSize);
3362
3363 // Load address of array of frame sizes into rsi (intptr_t*)
3364 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
3365
3366 // Counter
3367 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
3368
3369 // Now adjust the caller's stack to make up for the extra locals but
3370 // record the original sp so that we can save it in the skeletal
3371 // interpreter frame and the stack walking of interpreter_sender
3372 // will get the unextended sp value and not the "real" sp value.
3373
3374 const Register sender_sp = r8;
3375
3376 __ mov(sender_sp, rsp);
3377 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
3378 __ subptr(rsp, rbx);
3379
3380 // Push interpreter frames in a loop
3381 Label loop;
3382 __ bind(loop);
3383 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3384 __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand
3385 __ pushptr(Address(rcx, 0)); // Save return address
3386 __ enter(); // Save old & set new rbp
3387 __ subptr(rsp, rbx); // Prolog
3388 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3389 sender_sp); // Make it walkable
3390 // This value is corrected by layout_activation_impl
3391 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3392 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3393 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3394 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3395 __ decrementl(rdx); // Decrement counter
3396 __ jcc(Assembler::notZero, loop);
3397 __ pushptr(Address(rcx, 0)); // Save final return address
3398
3399 // Re-push self-frame
3400 __ enter(); // Save old & set new rbp
3401 __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3402 // Prolog
3403
3404 // Use rbp because the frames look interpreted now
3405 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3406 // Don't need the precise return PC here, just precise enough to point into this code blob.
3407 address the_pc = __ pc();
3408 __ set_last_Java_frame(noreg, rbp, the_pc);
3409
3410 // Call C code. Need thread but NOT official VM entry
3411 // crud. We cannot block on this call, no GC can happen. Call should
3412 // restore return values to their stack-slots with the new SP.
3413 // Thread is in rdi already.
3414 //
3415 // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3416
3417 __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3418 __ mov(c_rarg0, r15_thread);
3419 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3420 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3421
3422 // Set an oopmap for the call site
3423 // Use the same PC we used for the last java frame
3424 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3425
3426 // Clear fp AND pc
3427 __ reset_last_Java_frame(true);
3428
3429 // Pop self-frame.
3430 __ leave(); // Epilog
3431
3432 // Jump to interpreter
3433 __ ret(0);
3434
3435 // Make sure all code is generated
3436 masm->flush();
3437
3438 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps,
3439 SimpleRuntimeFrame::framesize >> 1);
3440 }
3441 #endif // COMPILER2
3442
3443
3444 //------------------------------generate_handler_blob------
3445 //
3446 // Generate a special Compile2Runtime blob that saves all registers,
3447 // and setup oopmap.
3448 //
generate_handler_blob(address call_ptr,int poll_type)3449 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3450 assert(StubRoutines::forward_exception_entry() != NULL,
3451 "must be generated before");
3452
3453 ResourceMark rm;
3454 OopMapSet *oop_maps = new OopMapSet();
3455 OopMap* map;
3456
3457 // Allocate space for the code. Setup code generation tools.
3458 CodeBuffer buffer("handler_blob", 2048, 1024);
3459 MacroAssembler* masm = new MacroAssembler(&buffer);
3460
3461 address start = __ pc();
3462 address call_pc = NULL;
3463 int frame_size_in_words;
3464 bool cause_return = (poll_type == POLL_AT_RETURN);
3465 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3466
3467 if (UseRTMLocking) {
3468 // Abort RTM transaction before calling runtime
3469 // because critical section will be large and will be
3470 // aborted anyway. Also nmethod could be deoptimized.
3471 __ xabort(0);
3472 }
3473
3474 // Make room for return address (or push it again)
3475 if (!cause_return) {
3476 __ push(rbx);
3477 }
3478
3479 // Save registers, fpu state, and flags
3480 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3481
3482 // The following is basically a call_VM. However, we need the precise
3483 // address of the call in order to generate an oopmap. Hence, we do all the
3484 // work outselves.
3485
3486 __ set_last_Java_frame(noreg, noreg, NULL);
3487
3488 // The return address must always be correct so that frame constructor never
3489 // sees an invalid pc.
3490
3491 if (!cause_return) {
3492 // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
3493 // Additionally, rbx is a callee saved register and we can look at it later to determine
3494 // if someone changed the return address for us!
3495 __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3496 __ movptr(Address(rbp, wordSize), rbx);
3497 }
3498
3499 // Do the call
3500 __ mov(c_rarg0, r15_thread);
3501 __ call(RuntimeAddress(call_ptr));
3502
3503 // Set an oopmap for the call site. This oopmap will map all
3504 // oop-registers and debug-info registers as callee-saved. This
3505 // will allow deoptimization at this safepoint to find all possible
3506 // debug-info recordings, as well as let GC find all oops.
3507
3508 oop_maps->add_gc_map( __ pc() - start, map);
3509
3510 Label noException;
3511
3512 __ reset_last_Java_frame(false);
3513
3514 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3515 __ jcc(Assembler::equal, noException);
3516
3517 // Exception pending
3518
3519 RegisterSaver::restore_live_registers(masm, save_vectors);
3520
3521 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3522
3523 // No exception case
3524 __ bind(noException);
3525
3526 Label no_adjust;
3527 #ifdef ASSERT
3528 Label bail;
3529 #endif
3530 if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
3531 Label no_prefix, not_special;
3532
3533 // If our stashed return pc was modified by the runtime we avoid touching it
3534 __ cmpptr(rbx, Address(rbp, wordSize));
3535 __ jccb(Assembler::notEqual, no_adjust);
3536
3537 // Skip over the poll instruction.
3538 // See NativeInstruction::is_safepoint_poll()
3539 // Possible encodings:
3540 // 85 00 test %eax,(%rax)
3541 // 85 01 test %eax,(%rcx)
3542 // 85 02 test %eax,(%rdx)
3543 // 85 03 test %eax,(%rbx)
3544 // 85 06 test %eax,(%rsi)
3545 // 85 07 test %eax,(%rdi)
3546 //
3547 // 41 85 00 test %eax,(%r8)
3548 // 41 85 01 test %eax,(%r9)
3549 // 41 85 02 test %eax,(%r10)
3550 // 41 85 03 test %eax,(%r11)
3551 // 41 85 06 test %eax,(%r14)
3552 // 41 85 07 test %eax,(%r15)
3553 //
3554 // 85 04 24 test %eax,(%rsp)
3555 // 41 85 04 24 test %eax,(%r12)
3556 // 85 45 00 test %eax,0x0(%rbp)
3557 // 41 85 45 00 test %eax,0x0(%r13)
3558
3559 __ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
3560 __ jcc(Assembler::notEqual, no_prefix);
3561 __ addptr(rbx, 1);
3562 __ bind(no_prefix);
3563 #ifdef ASSERT
3564 __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
3565 #endif
3566 // r12/r13/rsp/rbp base encoding takes 3 bytes with the following register values:
3567 // r12/rsp 0x04
3568 // r13/rbp 0x05
3569 __ movzbq(rcx, Address(rbx, 1));
3570 __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
3571 __ subptr(rcx, 4); // looking for 0x00 .. 0x01
3572 __ cmpptr(rcx, 1);
3573 __ jcc(Assembler::above, not_special);
3574 __ addptr(rbx, 1);
3575 __ bind(not_special);
3576 #ifdef ASSERT
3577 // Verify the correct encoding of the poll we're about to skip.
3578 __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
3579 __ jcc(Assembler::notEqual, bail);
3580 // Mask out the modrm bits
3581 __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
3582 // rax encodes to 0, so if the bits are nonzero it's incorrect
3583 __ jcc(Assembler::notZero, bail);
3584 #endif
3585 // Adjust return pc forward to step over the safepoint poll instruction
3586 __ addptr(rbx, 2);
3587 __ movptr(Address(rbp, wordSize), rbx);
3588 }
3589
3590 __ bind(no_adjust);
3591 // Normal exit, restore registers and exit.
3592 RegisterSaver::restore_live_registers(masm, save_vectors);
3593 __ ret(0);
3594
3595 #ifdef ASSERT
3596 __ bind(bail);
3597 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
3598 #endif
3599
3600 // Make sure all code is generated
3601 masm->flush();
3602
3603 // Fill-out other meta info
3604 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3605 }
3606
3607 //
3608 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3609 //
3610 // Generate a stub that calls into vm to find out the proper destination
3611 // of a java call. All the argument registers are live at this point
3612 // but since this is generic code we don't know what they are and the caller
3613 // must do any gc of the args.
3614 //
generate_resolve_blob(address destination,const char * name)3615 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3616 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3617
3618 // allocate space for the code
3619 ResourceMark rm;
3620
3621 CodeBuffer buffer(name, 1000, 512);
3622 MacroAssembler* masm = new MacroAssembler(&buffer);
3623
3624 int frame_size_in_words;
3625
3626 OopMapSet *oop_maps = new OopMapSet();
3627 OopMap* map = NULL;
3628
3629 int start = __ offset();
3630
3631 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3632
3633 int frame_complete = __ offset();
3634
3635 __ set_last_Java_frame(noreg, noreg, NULL);
3636
3637 __ mov(c_rarg0, r15_thread);
3638
3639 __ call(RuntimeAddress(destination));
3640
3641
3642 // Set an oopmap for the call site.
3643 // We need this not only for callee-saved registers, but also for volatile
3644 // registers that the compiler might be keeping live across a safepoint.
3645
3646 oop_maps->add_gc_map( __ offset() - start, map);
3647
3648 // rax contains the address we are going to jump to assuming no exception got installed
3649
3650 // clear last_Java_sp
3651 __ reset_last_Java_frame(false);
3652 // check for pending exceptions
3653 Label pending;
3654 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3655 __ jcc(Assembler::notEqual, pending);
3656
3657 // get the returned Method*
3658 __ get_vm_result_2(rbx, r15_thread);
3659 __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3660
3661 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3662
3663 RegisterSaver::restore_live_registers(masm);
3664
3665 // We are back the the original state on entry and ready to go.
3666
3667 __ jmp(rax);
3668
3669 // Pending exception after the safepoint
3670
3671 __ bind(pending);
3672
3673 RegisterSaver::restore_live_registers(masm);
3674
3675 // exception pending => remove activation and forward to exception handler
3676
3677 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3678
3679 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3680 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3681
3682 // -------------
3683 // make sure all code is generated
3684 masm->flush();
3685
3686 // return the blob
3687 // frame_size_words or bytes??
3688 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3689 }
3690
3691
3692 //------------------------------Montgomery multiplication------------------------
3693 //
3694
3695 #ifndef _WINDOWS
3696
3697 // Subtract 0:b from carry:a. Return carry.
3698 static julong
sub(julong a[],julong b[],julong carry,long len)3699 sub(julong a[], julong b[], julong carry, long len) {
3700 long long i = 0, cnt = len;
3701 julong tmp;
3702 asm volatile("clc; "
3703 "0: ; "
3704 "mov (%[b], %[i], 8), %[tmp]; "
3705 "sbb %[tmp], (%[a], %[i], 8); "
3706 "inc %[i]; dec %[cnt]; "
3707 "jne 0b; "
3708 "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3709 : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3710 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3711 : "memory");
3712 return tmp;
3713 }
3714
3715 // Multiply (unsigned) Long A by Long B, accumulating the double-
3716 // length result into the accumulator formed of T0, T1, and T2.
3717 #define MACC(A, B, T0, T1, T2) \
3718 do { \
3719 unsigned long hi, lo; \
3720 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
3721 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
3722 : "r"(A), "a"(B) : "cc"); \
3723 } while(0)
3724
3725 // As above, but add twice the double-length result into the
3726 // accumulator.
3727 #define MACC2(A, B, T0, T1, T2) \
3728 do { \
3729 unsigned long hi, lo; \
3730 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4; " \
3731 "add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
3732 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
3733 : "r"(A), "a"(B) : "cc"); \
3734 } while(0)
3735
3736 #else //_WINDOWS
3737
3738 static julong
sub(julong a[],julong b[],julong carry,long len)3739 sub(julong a[], julong b[], julong carry, long len) {
3740 long i;
3741 julong tmp;
3742 unsigned char c = 1;
3743 for (i = 0; i < len; i++) {
3744 c = _addcarry_u64(c, a[i], ~b[i], &tmp);
3745 a[i] = tmp;
3746 }
3747 c = _addcarry_u64(c, carry, ~0, &tmp);
3748 return tmp;
3749 }
3750
3751 // Multiply (unsigned) Long A by Long B, accumulating the double-
3752 // length result into the accumulator formed of T0, T1, and T2.
3753 #define MACC(A, B, T0, T1, T2) \
3754 do { \
3755 julong hi, lo; \
3756 lo = _umul128(A, B, &hi); \
3757 unsigned char c = _addcarry_u64(0, lo, T0, &T0); \
3758 c = _addcarry_u64(c, hi, T1, &T1); \
3759 _addcarry_u64(c, T2, 0, &T2); \
3760 } while(0)
3761
3762 // As above, but add twice the double-length result into the
3763 // accumulator.
3764 #define MACC2(A, B, T0, T1, T2) \
3765 do { \
3766 julong hi, lo; \
3767 lo = _umul128(A, B, &hi); \
3768 unsigned char c = _addcarry_u64(0, lo, T0, &T0); \
3769 c = _addcarry_u64(c, hi, T1, &T1); \
3770 _addcarry_u64(c, T2, 0, &T2); \
3771 c = _addcarry_u64(0, lo, T0, &T0); \
3772 c = _addcarry_u64(c, hi, T1, &T1); \
3773 _addcarry_u64(c, T2, 0, &T2); \
3774 } while(0)
3775
3776 #endif //_WINDOWS
3777
3778 // Fast Montgomery multiplication. The derivation of the algorithm is
3779 // in A Cryptographic Library for the Motorola DSP56000,
3780 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237.
3781
3782 static void NOINLINE
montgomery_multiply(julong a[],julong b[],julong n[],julong m[],julong inv,int len)3783 montgomery_multiply(julong a[], julong b[], julong n[],
3784 julong m[], julong inv, int len) {
3785 julong t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3786 int i;
3787
3788 assert(inv * n[0] == ULLONG_MAX, "broken inverse in Montgomery multiply");
3789
3790 for (i = 0; i < len; i++) {
3791 int j;
3792 for (j = 0; j < i; j++) {
3793 MACC(a[j], b[i-j], t0, t1, t2);
3794 MACC(m[j], n[i-j], t0, t1, t2);
3795 }
3796 MACC(a[i], b[0], t0, t1, t2);
3797 m[i] = t0 * inv;
3798 MACC(m[i], n[0], t0, t1, t2);
3799
3800 assert(t0 == 0, "broken Montgomery multiply");
3801
3802 t0 = t1; t1 = t2; t2 = 0;
3803 }
3804
3805 for (i = len; i < 2*len; i++) {
3806 int j;
3807 for (j = i-len+1; j < len; j++) {
3808 MACC(a[j], b[i-j], t0, t1, t2);
3809 MACC(m[j], n[i-j], t0, t1, t2);
3810 }
3811 m[i-len] = t0;
3812 t0 = t1; t1 = t2; t2 = 0;
3813 }
3814
3815 while (t0)
3816 t0 = sub(m, n, t0, len);
3817 }
3818
3819 // Fast Montgomery squaring. This uses asymptotically 25% fewer
3820 // multiplies so it should be up to 25% faster than Montgomery
3821 // multiplication. However, its loop control is more complex and it
3822 // may actually run slower on some machines.
3823
3824 static void NOINLINE
montgomery_square(julong a[],julong n[],julong m[],julong inv,int len)3825 montgomery_square(julong a[], julong n[],
3826 julong m[], julong inv, int len) {
3827 julong t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3828 int i;
3829
3830 assert(inv * n[0] == ULLONG_MAX, "broken inverse in Montgomery square");
3831
3832 for (i = 0; i < len; i++) {
3833 int j;
3834 int end = (i+1)/2;
3835 for (j = 0; j < end; j++) {
3836 MACC2(a[j], a[i-j], t0, t1, t2);
3837 MACC(m[j], n[i-j], t0, t1, t2);
3838 }
3839 if ((i & 1) == 0) {
3840 MACC(a[j], a[j], t0, t1, t2);
3841 }
3842 for (; j < i; j++) {
3843 MACC(m[j], n[i-j], t0, t1, t2);
3844 }
3845 m[i] = t0 * inv;
3846 MACC(m[i], n[0], t0, t1, t2);
3847
3848 assert(t0 == 0, "broken Montgomery square");
3849
3850 t0 = t1; t1 = t2; t2 = 0;
3851 }
3852
3853 for (i = len; i < 2*len; i++) {
3854 int start = i-len+1;
3855 int end = start + (len - start)/2;
3856 int j;
3857 for (j = start; j < end; j++) {
3858 MACC2(a[j], a[i-j], t0, t1, t2);
3859 MACC(m[j], n[i-j], t0, t1, t2);
3860 }
3861 if ((i & 1) == 0) {
3862 MACC(a[j], a[j], t0, t1, t2);
3863 }
3864 for (; j < len; j++) {
3865 MACC(m[j], n[i-j], t0, t1, t2);
3866 }
3867 m[i-len] = t0;
3868 t0 = t1; t1 = t2; t2 = 0;
3869 }
3870
3871 while (t0)
3872 t0 = sub(m, n, t0, len);
3873 }
3874
3875 // Swap words in a longword.
swap(julong x)3876 static julong swap(julong x) {
3877 return (x << 32) | (x >> 32);
3878 }
3879
3880 // Copy len longwords from s to d, word-swapping as we go. The
3881 // destination array is reversed.
reverse_words(julong * s,julong * d,int len)3882 static void reverse_words(julong *s, julong *d, int len) {
3883 d += len;
3884 while(len-- > 0) {
3885 d--;
3886 *d = swap(*s);
3887 s++;
3888 }
3889 }
3890
3891 // The threshold at which squaring is advantageous was determined
3892 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
3893 #define MONTGOMERY_SQUARING_THRESHOLD 64
3894
montgomery_multiply(jint * a_ints,jint * b_ints,jint * n_ints,jint len,jlong inv,jint * m_ints)3895 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
3896 jint len, jlong inv,
3897 jint *m_ints) {
3898 assert(len % 2 == 0, "array length in montgomery_multiply must be even");
3899 int longwords = len/2;
3900
3901 // Make very sure we don't use so much space that the stack might
3902 // overflow. 512 jints corresponds to an 16384-bit integer and
3903 // will use here a total of 8k bytes of stack space.
3904 int total_allocation = longwords * sizeof (julong) * 4;
3905 guarantee(total_allocation <= 8192, "must be");
3906 julong *scratch = (julong *)alloca(total_allocation);
3907
3908 // Local scratch arrays
3909 julong
3910 *a = scratch + 0 * longwords,
3911 *b = scratch + 1 * longwords,
3912 *n = scratch + 2 * longwords,
3913 *m = scratch + 3 * longwords;
3914
3915 reverse_words((julong *)a_ints, a, longwords);
3916 reverse_words((julong *)b_ints, b, longwords);
3917 reverse_words((julong *)n_ints, n, longwords);
3918
3919 ::montgomery_multiply(a, b, n, m, (julong)inv, longwords);
3920
3921 reverse_words(m, (julong *)m_ints, longwords);
3922 }
3923
montgomery_square(jint * a_ints,jint * n_ints,jint len,jlong inv,jint * m_ints)3924 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
3925 jint len, jlong inv,
3926 jint *m_ints) {
3927 assert(len % 2 == 0, "array length in montgomery_square must be even");
3928 int longwords = len/2;
3929
3930 // Make very sure we don't use so much space that the stack might
3931 // overflow. 512 jints corresponds to an 16384-bit integer and
3932 // will use here a total of 6k bytes of stack space.
3933 int total_allocation = longwords * sizeof (julong) * 3;
3934 guarantee(total_allocation <= 8192, "must be");
3935 julong *scratch = (julong *)alloca(total_allocation);
3936
3937 // Local scratch arrays
3938 julong
3939 *a = scratch + 0 * longwords,
3940 *n = scratch + 1 * longwords,
3941 *m = scratch + 2 * longwords;
3942
3943 reverse_words((julong *)a_ints, a, longwords);
3944 reverse_words((julong *)n_ints, n, longwords);
3945
3946 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3947 ::montgomery_square(a, n, m, (julong)inv, longwords);
3948 } else {
3949 ::montgomery_multiply(a, a, n, m, (julong)inv, longwords);
3950 }
3951
3952 reverse_words(m, (julong *)m_ints, longwords);
3953 }
3954
3955 #ifdef COMPILER2
3956 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
3957 //
3958 //------------------------------generate_exception_blob---------------------------
3959 // creates exception blob at the end
3960 // Using exception blob, this code is jumped from a compiled method.
3961 // (see emit_exception_handler in x86_64.ad file)
3962 //
3963 // Given an exception pc at a call we call into the runtime for the
3964 // handler in this method. This handler might merely restore state
3965 // (i.e. callee save registers) unwind the frame and jump to the
3966 // exception handler for the nmethod if there is no Java level handler
3967 // for the nmethod.
3968 //
3969 // This code is entered with a jmp.
3970 //
3971 // Arguments:
3972 // rax: exception oop
3973 // rdx: exception pc
3974 //
3975 // Results:
3976 // rax: exception oop
3977 // rdx: exception pc in caller or ???
3978 // destination: exception handler of caller
3979 //
3980 // Note: the exception pc MUST be at a call (precise debug information)
3981 // Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
3982 //
3983
generate_exception_blob()3984 void OptoRuntime::generate_exception_blob() {
3985 assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
3986 assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
3987 assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
3988
3989 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3990
3991 // Allocate space for the code
3992 ResourceMark rm;
3993 // Setup code generation tools
3994 CodeBuffer buffer("exception_blob", 2048, 1024);
3995 MacroAssembler* masm = new MacroAssembler(&buffer);
3996
3997
3998 address start = __ pc();
3999
4000 // Exception pc is 'return address' for stack walker
4001 __ push(rdx);
4002 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
4003
4004 // Save callee-saved registers. See x86_64.ad.
4005
4006 // rbp is an implicitly saved callee saved register (i.e., the calling
4007 // convention will save/restore it in the prolog/epilog). Other than that
4008 // there are no callee save registers now that adapter frames are gone.
4009
4010 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
4011
4012 // Store exception in Thread object. We cannot pass any arguments to the
4013 // handle_exception call, since we do not want to make any assumption
4014 // about the size of the frame where the exception happened in.
4015 // c_rarg0 is either rdi (Linux) or rcx (Windows).
4016 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
4017 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
4018
4019 // This call does all the hard work. It checks if an exception handler
4020 // exists in the method.
4021 // If so, it returns the handler address.
4022 // If not, it prepares for stack-unwinding, restoring the callee-save
4023 // registers of the frame being removed.
4024 //
4025 // address OptoRuntime::handle_exception_C(JavaThread* thread)
4026
4027 // At a method handle call, the stack may not be properly aligned
4028 // when returning with an exception.
4029 address the_pc = __ pc();
4030 __ set_last_Java_frame(noreg, noreg, the_pc);
4031 __ mov(c_rarg0, r15_thread);
4032 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
4033 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
4034
4035 // Set an oopmap for the call site. This oopmap will only be used if we
4036 // are unwinding the stack. Hence, all locations will be dead.
4037 // Callee-saved registers will be the same as the frame above (i.e.,
4038 // handle_exception_stub), since they were restored when we got the
4039 // exception.
4040
4041 OopMapSet* oop_maps = new OopMapSet();
4042
4043 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
4044
4045 __ reset_last_Java_frame(false);
4046
4047 // Restore callee-saved registers
4048
4049 // rbp is an implicitly saved callee-saved register (i.e., the calling
4050 // convention will save restore it in prolog/epilog) Other than that
4051 // there are no callee save registers now that adapter frames are gone.
4052
4053 __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
4054
4055 __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
4056 __ pop(rdx); // No need for exception pc anymore
4057
4058 // rax: exception handler
4059
4060 // We have a handler in rax (could be deopt blob).
4061 __ mov(r8, rax);
4062
4063 // Get the exception oop
4064 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4065 // Get the exception pc in case we are deoptimized
4066 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4067 #ifdef ASSERT
4068 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4069 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4070 #endif
4071 // Clear the exception oop so GC no longer processes it as a root.
4072 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4073
4074 // rax: exception oop
4075 // r8: exception handler
4076 // rdx: exception pc
4077 // Jump to handler
4078
4079 __ jmp(r8);
4080
4081 // Make sure all code is generated
4082 masm->flush();
4083
4084 // Set exception blob
4085 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4086 }
4087 #endif // COMPILER2
4088