1 /*
2 * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #if !defined(_WINDOWS) && !defined(_BSDONLY_SOURCE)
27 #include "alloca.h"
28 #endif
29 #ifdef _BSDONLY_SOURCE
30 #include <stdlib.h>
31 #endif
32 #include "asm/macroAssembler.hpp"
33 #include "asm/macroAssembler.inline.hpp"
34 #include "code/debugInfoRec.hpp"
35 #include "code/icBuffer.hpp"
36 #include "code/nativeInst.hpp"
37 #include "code/vtableStubs.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "gc/shared/barrierSet.hpp"
41 #include "gc/shared/barrierSetAssembler.hpp"
42 #include "interpreter/interpreter.hpp"
43 #include "logging/log.hpp"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "oops/compiledICHolder.hpp"
47 #include "oops/klass.inline.hpp"
48 #include "prims/methodHandles.hpp"
49 #include "runtime/safepointMechanism.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "runtime/vframeArray.hpp"
52 #include "runtime/vm_version.hpp"
53 #include "utilities/align.hpp"
54 #include "utilities/formatBuffer.hpp"
55 #include "vmreg_x86.inline.hpp"
56 #ifdef COMPILER1
57 #include "c1/c1_Runtime1.hpp"
58 #endif
59 #ifdef COMPILER2
60 #include "opto/runtime.hpp"
61 #endif
62 #if INCLUDE_JVMCI
63 #include "jvmci/jvmciJavaClasses.hpp"
64 #endif
65
66 #define __ masm->
67
68 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
69
70 class SimpleRuntimeFrame {
71
72 public:
73
74 // Most of the runtime stubs have this simple frame layout.
75 // This class exists to make the layout shared in one place.
76 // Offsets are for compiler stack slots, which are jints.
77 enum layout {
78 // The frame sender code expects that rbp will be in the "natural" place and
79 // will override any oopMap setting for it. We must therefore force the layout
80 // so that it agrees with the frame sender code.
81 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
82 rbp_off2,
83 return_off, return_off2,
84 framesize
85 };
86 };
87
88 class RegisterSaver {
89 // Capture info about frame layout. Layout offsets are in jint
90 // units because compiler frame slots are jints.
91 #define XSAVE_AREA_BEGIN 160
92 #define XSAVE_AREA_YMM_BEGIN 576
93 #define XSAVE_AREA_ZMM_BEGIN 1152
94 #define XSAVE_AREA_UPPERBANK 1664
95 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
96 #define DEF_YMM_OFFS(regnum) ymm ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt, ymm ## regnum ## H_off
97 #define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum)*32/BytesPerInt, zmm ## regnum ## H_off
98 #define DEF_ZMM_UPPER_OFFS(regnum) zmm ## regnum ## _off = zmm_upper_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off
99 enum layout {
100 fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
101 xmm_off = fpu_state_off + XSAVE_AREA_BEGIN/BytesPerInt, // offset in fxsave save area
102 DEF_XMM_OFFS(0),
103 DEF_XMM_OFFS(1),
104 // 2..15 are implied in range usage
105 ymm_off = xmm_off + (XSAVE_AREA_YMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
106 DEF_YMM_OFFS(0),
107 DEF_YMM_OFFS(1),
108 // 2..15 are implied in range usage
109 zmm_off = xmm_off + (XSAVE_AREA_ZMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
110 DEF_ZMM_OFFS(0),
111 DEF_ZMM_OFFS(1),
112 zmm_upper_off = xmm_off + (XSAVE_AREA_UPPERBANK - XSAVE_AREA_BEGIN)/BytesPerInt,
113 DEF_ZMM_UPPER_OFFS(16),
114 DEF_ZMM_UPPER_OFFS(17),
115 // 18..31 are implied in range usage
116 fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
117 fpu_stateH_end,
118 r15_off, r15H_off,
119 r14_off, r14H_off,
120 r13_off, r13H_off,
121 r12_off, r12H_off,
122 r11_off, r11H_off,
123 r10_off, r10H_off,
124 r9_off, r9H_off,
125 r8_off, r8H_off,
126 rdi_off, rdiH_off,
127 rsi_off, rsiH_off,
128 ignore_off, ignoreH_off, // extra copy of rbp
129 rsp_off, rspH_off,
130 rbx_off, rbxH_off,
131 rdx_off, rdxH_off,
132 rcx_off, rcxH_off,
133 rax_off, raxH_off,
134 // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
135 align_off, alignH_off,
136 flags_off, flagsH_off,
137 // The frame sender code expects that rbp will be in the "natural" place and
138 // will override any oopMap setting for it. We must therefore force the layout
139 // so that it agrees with the frame sender code.
140 rbp_off, rbpH_off, // copy of rbp we will restore
141 return_off, returnH_off, // slot for return address
142 reg_save_size // size in compiler stack slots
143 };
144
145 public:
146 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors);
147 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
148
149 // Offsets into the register save area
150 // Used by deoptimization when it is managing result register
151 // values on its own
152
rax_offset_in_bytes(void)153 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
rdx_offset_in_bytes(void)154 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
rbx_offset_in_bytes(void)155 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
xmm0_offset_in_bytes(void)156 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
return_offset_in_bytes(void)157 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
158
159 // During deoptimization only the result registers need to be restored,
160 // all the other values have already been extracted.
161 static void restore_result_registers(MacroAssembler* masm);
162 };
163
save_live_registers(MacroAssembler * masm,int additional_frame_words,int * total_frame_words,bool save_vectors)164 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
165 int off = 0;
166 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
167 if (UseAVX < 3) {
168 num_xmm_regs = num_xmm_regs/2;
169 }
170 #if COMPILER2_OR_JVMCI
171 if (save_vectors && UseAVX == 0) {
172 save_vectors = false; // vectors larger than 16 byte long are supported only with AVX
173 }
174 assert(!save_vectors || MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
175 #else
176 save_vectors = false; // vectors are generated only by C2 and JVMCI
177 #endif
178
179 // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
180 int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
181 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
182 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
183 // CodeBlob frame size is in words.
184 int frame_size_in_words = frame_size_in_bytes / wordSize;
185 *total_frame_words = frame_size_in_words;
186
187 // Save registers, fpu state, and flags.
188 // We assume caller has already pushed the return address onto the
189 // stack, so rsp is 8-byte aligned here.
190 // We push rpb twice in this sequence because we want the real rbp
191 // to be under the return like a normal enter.
192
193 __ enter(); // rsp becomes 16-byte aligned here
194 __ push_CPU_state(); // Push a multiple of 16 bytes
195
196 // push cpu state handles this on EVEX enabled targets
197 if (save_vectors) {
198 // Save upper half of YMM registers(0..15)
199 int base_addr = XSAVE_AREA_YMM_BEGIN;
200 for (int n = 0; n < 16; n++) {
201 __ vextractf128_high(Address(rsp, base_addr+n*16), as_XMMRegister(n));
202 }
203 if (VM_Version::supports_evex()) {
204 // Save upper half of ZMM registers(0..15)
205 base_addr = XSAVE_AREA_ZMM_BEGIN;
206 for (int n = 0; n < 16; n++) {
207 __ vextractf64x4_high(Address(rsp, base_addr+n*32), as_XMMRegister(n));
208 }
209 // Save full ZMM registers(16..num_xmm_regs)
210 base_addr = XSAVE_AREA_UPPERBANK;
211 off = 0;
212 int vector_len = Assembler::AVX_512bit;
213 for (int n = 16; n < num_xmm_regs; n++) {
214 __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
215 }
216 }
217 } else {
218 if (VM_Version::supports_evex()) {
219 // Save upper bank of ZMM registers(16..31) for double/float usage
220 int base_addr = XSAVE_AREA_UPPERBANK;
221 off = 0;
222 for (int n = 16; n < num_xmm_regs; n++) {
223 __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
224 }
225 }
226 }
227 __ vzeroupper();
228 if (frame::arg_reg_save_area_bytes != 0) {
229 // Allocate argument register save area
230 __ subptr(rsp, frame::arg_reg_save_area_bytes);
231 }
232
233 // Set an oopmap for the call site. This oopmap will map all
234 // oop-registers and debug-info registers as callee-saved. This
235 // will allow deoptimization at this safepoint to find all possible
236 // debug-info recordings, as well as let GC find all oops.
237
238 OopMapSet *oop_maps = new OopMapSet();
239 OopMap* map = new OopMap(frame_size_in_slots, 0);
240
241 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x))
242
243 map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
244 map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
245 map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
246 map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
247 // rbp location is known implicitly by the frame sender code, needs no oopmap
248 // and the location where rbp was saved by is ignored
249 map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
250 map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
251 map->set_callee_saved(STACK_OFFSET( r8_off ), r8->as_VMReg());
252 map->set_callee_saved(STACK_OFFSET( r9_off ), r9->as_VMReg());
253 map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
254 map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
255 map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
256 map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
257 map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
258 map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
259 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
260 // on EVEX enabled targets, we get it included in the xsave area
261 off = xmm0_off;
262 int delta = xmm1_off - off;
263 for (int n = 0; n < 16; n++) {
264 XMMRegister xmm_name = as_XMMRegister(n);
265 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
266 off += delta;
267 }
268 if (UseAVX > 2) {
269 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
270 off = zmm16_off;
271 delta = zmm17_off - off;
272 for (int n = 16; n < num_xmm_regs; n++) {
273 XMMRegister zmm_name = as_XMMRegister(n);
274 map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
275 off += delta;
276 }
277 }
278
279 #if COMPILER2_OR_JVMCI
280 if (save_vectors) {
281 // Save upper half of YMM registers(0..15)
282 off = ymm0_off;
283 delta = ymm1_off - ymm0_off;
284 for (int n = 0; n < 16; n++) {
285 XMMRegister ymm_name = as_XMMRegister(n);
286 map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
287 off += delta;
288 }
289 if (VM_Version::supports_evex()) {
290 // Save upper half of ZMM registers(0..15)
291 off = zmm0_off;
292 delta = zmm1_off - zmm0_off;
293 for (int n = 0; n < 16; n++) {
294 XMMRegister zmm_name = as_XMMRegister(n);
295 map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next(8));
296 off += delta;
297 }
298 }
299 }
300 #endif // COMPILER2_OR_JVMCI
301
302 // %%% These should all be a waste but we'll keep things as they were for now
303 if (true) {
304 map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
305 map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
306 map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
307 map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
308 // rbp location is known implicitly by the frame sender code, needs no oopmap
309 map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
310 map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
311 map->set_callee_saved(STACK_OFFSET( r8H_off ), r8->as_VMReg()->next());
312 map->set_callee_saved(STACK_OFFSET( r9H_off ), r9->as_VMReg()->next());
313 map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
314 map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
315 map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
316 map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
317 map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
318 map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
319 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
320 // on EVEX enabled targets, we get it included in the xsave area
321 off = xmm0H_off;
322 delta = xmm1H_off - off;
323 for (int n = 0; n < 16; n++) {
324 XMMRegister xmm_name = as_XMMRegister(n);
325 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next());
326 off += delta;
327 }
328 if (UseAVX > 2) {
329 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
330 off = zmm16H_off;
331 delta = zmm17H_off - off;
332 for (int n = 16; n < num_xmm_regs; n++) {
333 XMMRegister zmm_name = as_XMMRegister(n);
334 map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
335 off += delta;
336 }
337 }
338 }
339
340 return map;
341 }
342
restore_live_registers(MacroAssembler * masm,bool restore_vectors)343 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
344 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
345 if (UseAVX < 3) {
346 num_xmm_regs = num_xmm_regs/2;
347 }
348 if (frame::arg_reg_save_area_bytes != 0) {
349 // Pop arg register save area
350 __ addptr(rsp, frame::arg_reg_save_area_bytes);
351 }
352
353 #if COMPILER2_OR_JVMCI
354 if (restore_vectors) {
355 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
356 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
357 }
358 #else
359 assert(!restore_vectors, "vectors are generated only by C2");
360 #endif
361
362 __ vzeroupper();
363
364 // On EVEX enabled targets everything is handled in pop fpu state
365 if (restore_vectors) {
366 // Restore upper half of YMM registers (0..15)
367 int base_addr = XSAVE_AREA_YMM_BEGIN;
368 for (int n = 0; n < 16; n++) {
369 __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
370 }
371 if (VM_Version::supports_evex()) {
372 // Restore upper half of ZMM registers (0..15)
373 base_addr = XSAVE_AREA_ZMM_BEGIN;
374 for (int n = 0; n < 16; n++) {
375 __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, base_addr+n*32));
376 }
377 // Restore full ZMM registers(16..num_xmm_regs)
378 base_addr = XSAVE_AREA_UPPERBANK;
379 int vector_len = Assembler::AVX_512bit;
380 int off = 0;
381 for (int n = 16; n < num_xmm_regs; n++) {
382 __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
383 }
384 }
385 } else {
386 if (VM_Version::supports_evex()) {
387 // Restore upper bank of ZMM registers(16..31) for double/float usage
388 int base_addr = XSAVE_AREA_UPPERBANK;
389 int off = 0;
390 for (int n = 16; n < num_xmm_regs; n++) {
391 __ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
392 }
393 }
394 }
395
396 // Recover CPU state
397 __ pop_CPU_state();
398 // Get the rbp described implicitly by the calling convention (no oopMap)
399 __ pop(rbp);
400 }
401
restore_result_registers(MacroAssembler * masm)402 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
403
404 // Just restore result register. Only used by deoptimization. By
405 // now any callee save register that needs to be restored to a c2
406 // caller of the deoptee has been extracted into the vframeArray
407 // and will be stuffed into the c2i adapter we create for later
408 // restoration so only result registers need to be restored here.
409
410 // Restore fp result register
411 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
412 // Restore integer result register
413 __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
414 __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
415
416 // Pop all of the register save are off the stack except the return address
417 __ addptr(rsp, return_offset_in_bytes());
418 }
419
420 // Is vector's size (in bytes) bigger than a size saved by default?
421 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
is_wide_vector(int size)422 bool SharedRuntime::is_wide_vector(int size) {
423 return size > 16;
424 }
425
trampoline_size()426 size_t SharedRuntime::trampoline_size() {
427 return 16;
428 }
429
generate_trampoline(MacroAssembler * masm,address destination)430 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
431 __ jump(RuntimeAddress(destination));
432 }
433
434 // The java_calling_convention describes stack locations as ideal slots on
435 // a frame with no abi restrictions. Since we must observe abi restrictions
436 // (like the placement of the register window) the slots must be biased by
437 // the following value.
reg2offset_in(VMReg r)438 static int reg2offset_in(VMReg r) {
439 // Account for saved rbp and return address
440 // This should really be in_preserve_stack_slots
441 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
442 }
443
reg2offset_out(VMReg r)444 static int reg2offset_out(VMReg r) {
445 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
446 }
447
448 // ---------------------------------------------------------------------------
449 // Read the array of BasicTypes from a signature, and compute where the
450 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
451 // quantities. Values less than VMRegImpl::stack0 are registers, those above
452 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
453 // as framesizes are fixed.
454 // VMRegImpl::stack0 refers to the first slot 0(sp).
455 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
456 // up to RegisterImpl::number_of_registers) are the 64-bit
457 // integer registers.
458
459 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
460 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
461 // units regardless of build. Of course for i486 there is no 64 bit build
462
463 // The Java calling convention is a "shifted" version of the C ABI.
464 // By skipping the first C ABI register we can call non-static jni methods
465 // with small numbers of arguments without having to shuffle the arguments
466 // at all. Since we control the java ABI we ought to at least get some
467 // advantage out of it.
468
java_calling_convention(const BasicType * sig_bt,VMRegPair * regs,int total_args_passed)469 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
470 VMRegPair *regs,
471 int total_args_passed) {
472
473 // Create the mapping between argument positions and
474 // registers.
475 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
476 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
477 };
478 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
479 j_farg0, j_farg1, j_farg2, j_farg3,
480 j_farg4, j_farg5, j_farg6, j_farg7
481 };
482
483
484 uint int_args = 0;
485 uint fp_args = 0;
486 uint stk_args = 0; // inc by 2 each time
487
488 for (int i = 0; i < total_args_passed; i++) {
489 switch (sig_bt[i]) {
490 case T_BOOLEAN:
491 case T_CHAR:
492 case T_BYTE:
493 case T_SHORT:
494 case T_INT:
495 if (int_args < Argument::n_int_register_parameters_j) {
496 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
497 } else {
498 regs[i].set1(VMRegImpl::stack2reg(stk_args));
499 stk_args += 2;
500 }
501 break;
502 case T_VOID:
503 // halves of T_LONG or T_DOUBLE
504 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
505 regs[i].set_bad();
506 break;
507 case T_LONG:
508 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
509 // fall through
510 case T_OBJECT:
511 case T_ARRAY:
512 case T_ADDRESS:
513 if (int_args < Argument::n_int_register_parameters_j) {
514 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
515 } else {
516 regs[i].set2(VMRegImpl::stack2reg(stk_args));
517 stk_args += 2;
518 }
519 break;
520 case T_FLOAT:
521 if (fp_args < Argument::n_float_register_parameters_j) {
522 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
523 } else {
524 regs[i].set1(VMRegImpl::stack2reg(stk_args));
525 stk_args += 2;
526 }
527 break;
528 case T_DOUBLE:
529 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
530 if (fp_args < Argument::n_float_register_parameters_j) {
531 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
532 } else {
533 regs[i].set2(VMRegImpl::stack2reg(stk_args));
534 stk_args += 2;
535 }
536 break;
537 default:
538 ShouldNotReachHere();
539 break;
540 }
541 }
542
543 return align_up(stk_args, 2);
544 }
545
546 // Patch the callers callsite with entry to compiled code if it exists.
patch_callers_callsite(MacroAssembler * masm)547 static void patch_callers_callsite(MacroAssembler *masm) {
548 Label L;
549 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
550 __ jcc(Assembler::equal, L);
551
552 // Save the current stack pointer
553 __ mov(r13, rsp);
554 // Schedule the branch target address early.
555 // Call into the VM to patch the caller, then jump to compiled callee
556 // rax isn't live so capture return address while we easily can
557 __ movptr(rax, Address(rsp, 0));
558
559 // align stack so push_CPU_state doesn't fault
560 __ andptr(rsp, -(StackAlignmentInBytes));
561 __ push_CPU_state();
562 __ vzeroupper();
563 // VM needs caller's callsite
564 // VM needs target method
565 // This needs to be a long call since we will relocate this adapter to
566 // the codeBuffer and it may not reach
567
568 // Allocate argument register save area
569 if (frame::arg_reg_save_area_bytes != 0) {
570 __ subptr(rsp, frame::arg_reg_save_area_bytes);
571 }
572 __ mov(c_rarg0, rbx);
573 __ mov(c_rarg1, rax);
574 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
575
576 // De-allocate argument register save area
577 if (frame::arg_reg_save_area_bytes != 0) {
578 __ addptr(rsp, frame::arg_reg_save_area_bytes);
579 }
580
581 __ vzeroupper();
582 __ pop_CPU_state();
583 // restore sp
584 __ mov(rsp, r13);
585 __ bind(L);
586 }
587
588
gen_c2i_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,Label & skip_fixup)589 static void gen_c2i_adapter(MacroAssembler *masm,
590 int total_args_passed,
591 int comp_args_on_stack,
592 const BasicType *sig_bt,
593 const VMRegPair *regs,
594 Label& skip_fixup) {
595 // Before we get into the guts of the C2I adapter, see if we should be here
596 // at all. We've come from compiled code and are attempting to jump to the
597 // interpreter, which means the caller made a static call to get here
598 // (vcalls always get a compiled target if there is one). Check for a
599 // compiled target. If there is one, we need to patch the caller's call.
600 patch_callers_callsite(masm);
601
602 __ bind(skip_fixup);
603
604 // Since all args are passed on the stack, total_args_passed *
605 // Interpreter::stackElementSize is the space we need. Plus 1 because
606 // we also account for the return address location since
607 // we store it first rather than hold it in rax across all the shuffling
608
609 int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
610
611 // stack is aligned, keep it that way
612 extraspace = align_up(extraspace, 2*wordSize);
613
614 // Get return address
615 __ pop(rax);
616
617 // set senderSP value
618 __ mov(r13, rsp);
619
620 __ subptr(rsp, extraspace);
621
622 // Store the return address in the expected location
623 __ movptr(Address(rsp, 0), rax);
624
625 // Now write the args into the outgoing interpreter space
626 for (int i = 0; i < total_args_passed; i++) {
627 if (sig_bt[i] == T_VOID) {
628 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
629 continue;
630 }
631
632 // offset to start parameters
633 int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
634 int next_off = st_off - Interpreter::stackElementSize;
635
636 // Say 4 args:
637 // i st_off
638 // 0 32 T_LONG
639 // 1 24 T_VOID
640 // 2 16 T_OBJECT
641 // 3 8 T_BOOL
642 // - 0 return address
643 //
644 // However to make thing extra confusing. Because we can fit a long/double in
645 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
646 // leaves one slot empty and only stores to a single slot. In this case the
647 // slot that is occupied is the T_VOID slot. See I said it was confusing.
648
649 VMReg r_1 = regs[i].first();
650 VMReg r_2 = regs[i].second();
651 if (!r_1->is_valid()) {
652 assert(!r_2->is_valid(), "");
653 continue;
654 }
655 if (r_1->is_stack()) {
656 // memory to memory use rax
657 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
658 if (!r_2->is_valid()) {
659 // sign extend??
660 __ movl(rax, Address(rsp, ld_off));
661 __ movptr(Address(rsp, st_off), rax);
662
663 } else {
664
665 __ movq(rax, Address(rsp, ld_off));
666
667 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
668 // T_DOUBLE and T_LONG use two slots in the interpreter
669 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
670 // ld_off == LSW, ld_off+wordSize == MSW
671 // st_off == MSW, next_off == LSW
672 __ movq(Address(rsp, next_off), rax);
673 #ifdef ASSERT
674 // Overwrite the unused slot with known junk
675 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
676 __ movptr(Address(rsp, st_off), rax);
677 #endif /* ASSERT */
678 } else {
679 __ movq(Address(rsp, st_off), rax);
680 }
681 }
682 } else if (r_1->is_Register()) {
683 Register r = r_1->as_Register();
684 if (!r_2->is_valid()) {
685 // must be only an int (or less ) so move only 32bits to slot
686 // why not sign extend??
687 __ movl(Address(rsp, st_off), r);
688 } else {
689 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
690 // T_DOUBLE and T_LONG use two slots in the interpreter
691 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
692 // long/double in gpr
693 #ifdef ASSERT
694 // Overwrite the unused slot with known junk
695 __ mov64(rax, CONST64(0xdeadffffdeadaaab));
696 __ movptr(Address(rsp, st_off), rax);
697 #endif /* ASSERT */
698 __ movq(Address(rsp, next_off), r);
699 } else {
700 __ movptr(Address(rsp, st_off), r);
701 }
702 }
703 } else {
704 assert(r_1->is_XMMRegister(), "");
705 if (!r_2->is_valid()) {
706 // only a float use just part of the slot
707 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
708 } else {
709 #ifdef ASSERT
710 // Overwrite the unused slot with known junk
711 __ mov64(rax, CONST64(0xdeadffffdeadaaac));
712 __ movptr(Address(rsp, st_off), rax);
713 #endif /* ASSERT */
714 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
715 }
716 }
717 }
718
719 // Schedule the branch target address early.
720 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
721 __ jmp(rcx);
722 }
723
range_check(MacroAssembler * masm,Register pc_reg,Register temp_reg,address code_start,address code_end,Label & L_ok)724 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
725 address code_start, address code_end,
726 Label& L_ok) {
727 Label L_fail;
728 __ lea(temp_reg, ExternalAddress(code_start));
729 __ cmpptr(pc_reg, temp_reg);
730 __ jcc(Assembler::belowEqual, L_fail);
731 __ lea(temp_reg, ExternalAddress(code_end));
732 __ cmpptr(pc_reg, temp_reg);
733 __ jcc(Assembler::below, L_ok);
734 __ bind(L_fail);
735 }
736
gen_i2c_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs)737 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
738 int total_args_passed,
739 int comp_args_on_stack,
740 const BasicType *sig_bt,
741 const VMRegPair *regs) {
742
743 // Note: r13 contains the senderSP on entry. We must preserve it since
744 // we may do a i2c -> c2i transition if we lose a race where compiled
745 // code goes non-entrant while we get args ready.
746 // In addition we use r13 to locate all the interpreter args as
747 // we must align the stack to 16 bytes on an i2c entry else we
748 // lose alignment we expect in all compiled code and register
749 // save code can segv when fxsave instructions find improperly
750 // aligned stack pointer.
751
752 // Adapters can be frameless because they do not require the caller
753 // to perform additional cleanup work, such as correcting the stack pointer.
754 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
755 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
756 // even if a callee has modified the stack pointer.
757 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
758 // routinely repairs its caller's stack pointer (from sender_sp, which is set
759 // up via the senderSP register).
760 // In other words, if *either* the caller or callee is interpreted, we can
761 // get the stack pointer repaired after a call.
762 // This is why c2i and i2c adapters cannot be indefinitely composed.
763 // In particular, if a c2i adapter were to somehow call an i2c adapter,
764 // both caller and callee would be compiled methods, and neither would
765 // clean up the stack pointer changes performed by the two adapters.
766 // If this happens, control eventually transfers back to the compiled
767 // caller, but with an uncorrected stack, causing delayed havoc.
768
769 // Pick up the return address
770 __ movptr(rax, Address(rsp, 0));
771
772 if (VerifyAdapterCalls &&
773 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
774 // So, let's test for cascading c2i/i2c adapters right now.
775 // assert(Interpreter::contains($return_addr) ||
776 // StubRoutines::contains($return_addr),
777 // "i2c adapter must return to an interpreter frame");
778 __ block_comment("verify_i2c { ");
779 Label L_ok;
780 if (Interpreter::code() != NULL)
781 range_check(masm, rax, r11,
782 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
783 L_ok);
784 if (StubRoutines::code1() != NULL)
785 range_check(masm, rax, r11,
786 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
787 L_ok);
788 if (StubRoutines::code2() != NULL)
789 range_check(masm, rax, r11,
790 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
791 L_ok);
792 const char* msg = "i2c adapter must return to an interpreter frame";
793 __ block_comment(msg);
794 __ stop(msg);
795 __ bind(L_ok);
796 __ block_comment("} verify_i2ce ");
797 }
798
799 // Must preserve original SP for loading incoming arguments because
800 // we need to align the outgoing SP for compiled code.
801 __ movptr(r11, rsp);
802
803 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
804 // in registers, we will occasionally have no stack args.
805 int comp_words_on_stack = 0;
806 if (comp_args_on_stack) {
807 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
808 // registers are below. By subtracting stack0, we either get a negative
809 // number (all values in registers) or the maximum stack slot accessed.
810
811 // Convert 4-byte c2 stack slots to words.
812 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
813 // Round up to miminum stack alignment, in wordSize
814 comp_words_on_stack = align_up(comp_words_on_stack, 2);
815 __ subptr(rsp, comp_words_on_stack * wordSize);
816 }
817
818
819 // Ensure compiled code always sees stack at proper alignment
820 __ andptr(rsp, -16);
821
822 // push the return address and misalign the stack that youngest frame always sees
823 // as far as the placement of the call instruction
824 __ push(rax);
825
826 // Put saved SP in another register
827 const Register saved_sp = rax;
828 __ movptr(saved_sp, r11);
829
830 // Will jump to the compiled code just as if compiled code was doing it.
831 // Pre-load the register-jump target early, to schedule it better.
832 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
833
834 #if INCLUDE_JVMCI
835 if (EnableJVMCI || UseAOT) {
836 // check if this call should be routed towards a specific entry point
837 __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
838 Label no_alternative_target;
839 __ jcc(Assembler::equal, no_alternative_target);
840 __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
841 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
842 __ bind(no_alternative_target);
843 }
844 #endif // INCLUDE_JVMCI
845
846 // Now generate the shuffle code. Pick up all register args and move the
847 // rest through the floating point stack top.
848 for (int i = 0; i < total_args_passed; i++) {
849 if (sig_bt[i] == T_VOID) {
850 // Longs and doubles are passed in native word order, but misaligned
851 // in the 32-bit build.
852 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
853 continue;
854 }
855
856 // Pick up 0, 1 or 2 words from SP+offset.
857
858 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
859 "scrambled load targets?");
860 // Load in argument order going down.
861 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
862 // Point to interpreter value (vs. tag)
863 int next_off = ld_off - Interpreter::stackElementSize;
864 //
865 //
866 //
867 VMReg r_1 = regs[i].first();
868 VMReg r_2 = regs[i].second();
869 if (!r_1->is_valid()) {
870 assert(!r_2->is_valid(), "");
871 continue;
872 }
873 if (r_1->is_stack()) {
874 // Convert stack slot to an SP offset (+ wordSize to account for return address )
875 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
876
877 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
878 // and if we end up going thru a c2i because of a miss a reasonable value of r13
879 // will be generated.
880 if (!r_2->is_valid()) {
881 // sign extend???
882 __ movl(r13, Address(saved_sp, ld_off));
883 __ movptr(Address(rsp, st_off), r13);
884 } else {
885 //
886 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
887 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
888 // So we must adjust where to pick up the data to match the interpreter.
889 //
890 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
891 // are accessed as negative so LSW is at LOW address
892
893 // ld_off is MSW so get LSW
894 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
895 next_off : ld_off;
896 __ movq(r13, Address(saved_sp, offset));
897 // st_off is LSW (i.e. reg.first())
898 __ movq(Address(rsp, st_off), r13);
899 }
900 } else if (r_1->is_Register()) { // Register argument
901 Register r = r_1->as_Register();
902 assert(r != rax, "must be different");
903 if (r_2->is_valid()) {
904 //
905 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
906 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
907 // So we must adjust where to pick up the data to match the interpreter.
908
909 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
910 next_off : ld_off;
911
912 // this can be a misaligned move
913 __ movq(r, Address(saved_sp, offset));
914 } else {
915 // sign extend and use a full word?
916 __ movl(r, Address(saved_sp, ld_off));
917 }
918 } else {
919 if (!r_2->is_valid()) {
920 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
921 } else {
922 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
923 }
924 }
925 }
926
927 // 6243940 We might end up in handle_wrong_method if
928 // the callee is deoptimized as we race thru here. If that
929 // happens we don't want to take a safepoint because the
930 // caller frame will look interpreted and arguments are now
931 // "compiled" so it is much better to make this transition
932 // invisible to the stack walking code. Unfortunately if
933 // we try and find the callee by normal means a safepoint
934 // is possible. So we stash the desired callee in the thread
935 // and the vm will find there should this case occur.
936
937 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
938
939 // put Method* where a c2i would expect should we end up there
940 // only needed becaus eof c2 resolve stubs return Method* as a result in
941 // rax
942 __ mov(rax, rbx);
943 __ jmp(r11);
944 }
945
946 // ---------------------------------------------------------------
generate_i2c2i_adapters(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,AdapterFingerPrint * fingerprint)947 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
948 int total_args_passed,
949 int comp_args_on_stack,
950 const BasicType *sig_bt,
951 const VMRegPair *regs,
952 AdapterFingerPrint* fingerprint) {
953 address i2c_entry = __ pc();
954
955 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
956
957 // -------------------------------------------------------------------------
958 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
959 // to the interpreter. The args start out packed in the compiled layout. They
960 // need to be unpacked into the interpreter layout. This will almost always
961 // require some stack space. We grow the current (compiled) stack, then repack
962 // the args. We finally end in a jump to the generic interpreter entry point.
963 // On exit from the interpreter, the interpreter will restore our SP (lest the
964 // compiled code, which relys solely on SP and not RBP, get sick).
965
966 address c2i_unverified_entry = __ pc();
967 Label skip_fixup;
968 Label ok;
969
970 Register holder = rax;
971 Register receiver = j_rarg0;
972 Register temp = rbx;
973
974 {
975 __ load_klass(temp, receiver, rscratch1);
976 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
977 __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
978 __ jcc(Assembler::equal, ok);
979 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
980
981 __ bind(ok);
982 // Method might have been compiled since the call site was patched to
983 // interpreted if that is the case treat it as a miss so we can get
984 // the call site corrected.
985 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
986 __ jcc(Assembler::equal, skip_fixup);
987 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
988 }
989
990 address c2i_entry = __ pc();
991
992 // Class initialization barrier for static methods
993 address c2i_no_clinit_check_entry = NULL;
994 if (VM_Version::supports_fast_class_init_checks()) {
995 Label L_skip_barrier;
996 Register method = rbx;
997
998 { // Bypass the barrier for non-static methods
999 Register flags = rscratch1;
1000 __ movl(flags, Address(method, Method::access_flags_offset()));
1001 __ testl(flags, JVM_ACC_STATIC);
1002 __ jcc(Assembler::zero, L_skip_barrier); // non-static
1003 }
1004
1005 Register klass = rscratch1;
1006 __ load_method_holder(klass, method);
1007 __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
1008
1009 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
1010
1011 __ bind(L_skip_barrier);
1012 c2i_no_clinit_check_entry = __ pc();
1013 }
1014
1015 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1016 bs->c2i_entry_barrier(masm);
1017
1018 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1019
1020 __ flush();
1021 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
1022 }
1023
c_calling_convention(const BasicType * sig_bt,VMRegPair * regs,VMRegPair * regs2,int total_args_passed)1024 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1025 VMRegPair *regs,
1026 VMRegPair *regs2,
1027 int total_args_passed) {
1028 assert(regs2 == NULL, "not needed on x86");
1029 // We return the amount of VMRegImpl stack slots we need to reserve for all
1030 // the arguments NOT counting out_preserve_stack_slots.
1031
1032 // NOTE: These arrays will have to change when c1 is ported
1033 #ifdef _WIN64
1034 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1035 c_rarg0, c_rarg1, c_rarg2, c_rarg3
1036 };
1037 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1038 c_farg0, c_farg1, c_farg2, c_farg3
1039 };
1040 #else
1041 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1042 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
1043 };
1044 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1045 c_farg0, c_farg1, c_farg2, c_farg3,
1046 c_farg4, c_farg5, c_farg6, c_farg7
1047 };
1048 #endif // _WIN64
1049
1050
1051 uint int_args = 0;
1052 uint fp_args = 0;
1053 uint stk_args = 0; // inc by 2 each time
1054
1055 for (int i = 0; i < total_args_passed; i++) {
1056 switch (sig_bt[i]) {
1057 case T_BOOLEAN:
1058 case T_CHAR:
1059 case T_BYTE:
1060 case T_SHORT:
1061 case T_INT:
1062 if (int_args < Argument::n_int_register_parameters_c) {
1063 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1064 #ifdef _WIN64
1065 fp_args++;
1066 // Allocate slots for callee to stuff register args the stack.
1067 stk_args += 2;
1068 #endif
1069 } else {
1070 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1071 stk_args += 2;
1072 }
1073 break;
1074 case T_LONG:
1075 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1076 // fall through
1077 case T_OBJECT:
1078 case T_ARRAY:
1079 case T_ADDRESS:
1080 case T_METADATA:
1081 if (int_args < Argument::n_int_register_parameters_c) {
1082 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1083 #ifdef _WIN64
1084 fp_args++;
1085 stk_args += 2;
1086 #endif
1087 } else {
1088 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1089 stk_args += 2;
1090 }
1091 break;
1092 case T_FLOAT:
1093 if (fp_args < Argument::n_float_register_parameters_c) {
1094 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1095 #ifdef _WIN64
1096 int_args++;
1097 // Allocate slots for callee to stuff register args the stack.
1098 stk_args += 2;
1099 #endif
1100 } else {
1101 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1102 stk_args += 2;
1103 }
1104 break;
1105 case T_DOUBLE:
1106 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1107 if (fp_args < Argument::n_float_register_parameters_c) {
1108 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1109 #ifdef _WIN64
1110 int_args++;
1111 // Allocate slots for callee to stuff register args the stack.
1112 stk_args += 2;
1113 #endif
1114 } else {
1115 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1116 stk_args += 2;
1117 }
1118 break;
1119 case T_VOID: // Halves of longs and doubles
1120 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1121 regs[i].set_bad();
1122 break;
1123 default:
1124 ShouldNotReachHere();
1125 break;
1126 }
1127 }
1128 #ifdef _WIN64
1129 // windows abi requires that we always allocate enough stack space
1130 // for 4 64bit registers to be stored down.
1131 if (stk_args < 8) {
1132 stk_args = 8;
1133 }
1134 #endif // _WIN64
1135
1136 return stk_args;
1137 }
1138
1139 // On 64 bit we will store integer like items to the stack as
1140 // 64 bits items (x86_32/64 abi) even though java would only store
1141 // 32bits for a parameter. On 32bit it will simply be 32 bits
1142 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
move32_64(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1143 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1144 if (src.first()->is_stack()) {
1145 if (dst.first()->is_stack()) {
1146 // stack to stack
1147 __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
1148 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1149 } else {
1150 // stack to reg
1151 __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1152 }
1153 } else if (dst.first()->is_stack()) {
1154 // reg to stack
1155 // Do we really have to sign extend???
1156 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1157 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1158 } else {
1159 // Do we really have to sign extend???
1160 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
1161 if (dst.first() != src.first()) {
1162 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1163 }
1164 }
1165 }
1166
move_ptr(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1167 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1168 if (src.first()->is_stack()) {
1169 if (dst.first()->is_stack()) {
1170 // stack to stack
1171 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1172 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1173 } else {
1174 // stack to reg
1175 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1176 }
1177 } else if (dst.first()->is_stack()) {
1178 // reg to stack
1179 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1180 } else {
1181 if (dst.first() != src.first()) {
1182 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1183 }
1184 }
1185 }
1186
1187 // An oop arg. Must pass a handle not the oop itself
object_move(MacroAssembler * masm,OopMap * map,int oop_handle_offset,int framesize_in_slots,VMRegPair src,VMRegPair dst,bool is_receiver,int * receiver_offset)1188 static void object_move(MacroAssembler* masm,
1189 OopMap* map,
1190 int oop_handle_offset,
1191 int framesize_in_slots,
1192 VMRegPair src,
1193 VMRegPair dst,
1194 bool is_receiver,
1195 int* receiver_offset) {
1196
1197 // must pass a handle. First figure out the location we use as a handle
1198
1199 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1200
1201 // See if oop is NULL if it is we need no handle
1202
1203 if (src.first()->is_stack()) {
1204
1205 // Oop is already on the stack as an argument
1206 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1207 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1208 if (is_receiver) {
1209 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1210 }
1211
1212 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1213 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1214 // conditionally move a NULL
1215 __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1216 } else {
1217
1218 // Oop is in an a register we must store it to the space we reserve
1219 // on the stack for oop_handles and pass a handle if oop is non-NULL
1220
1221 const Register rOop = src.first()->as_Register();
1222 int oop_slot;
1223 if (rOop == j_rarg0)
1224 oop_slot = 0;
1225 else if (rOop == j_rarg1)
1226 oop_slot = 1;
1227 else if (rOop == j_rarg2)
1228 oop_slot = 2;
1229 else if (rOop == j_rarg3)
1230 oop_slot = 3;
1231 else if (rOop == j_rarg4)
1232 oop_slot = 4;
1233 else {
1234 assert(rOop == j_rarg5, "wrong register");
1235 oop_slot = 5;
1236 }
1237
1238 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1239 int offset = oop_slot*VMRegImpl::stack_slot_size;
1240
1241 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1242 // Store oop in handle area, may be NULL
1243 __ movptr(Address(rsp, offset), rOop);
1244 if (is_receiver) {
1245 *receiver_offset = offset;
1246 }
1247
1248 __ cmpptr(rOop, (int32_t)NULL_WORD);
1249 __ lea(rHandle, Address(rsp, offset));
1250 // conditionally move a NULL from the handle area where it was just stored
1251 __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1252 }
1253
1254 // If arg is on the stack then place it otherwise it is already in correct reg.
1255 if (dst.first()->is_stack()) {
1256 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1257 }
1258 }
1259
1260 // A float arg may have to do float reg int reg conversion
float_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1261 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1262 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1263
1264 // The calling conventions assures us that each VMregpair is either
1265 // all really one physical register or adjacent stack slots.
1266
1267 if (src.first()->is_stack()) {
1268 if (dst.first()->is_stack()) {
1269 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1270 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1271 } else {
1272 // stack to reg
1273 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1274 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1275 }
1276 } else if (dst.first()->is_stack()) {
1277 // reg to stack
1278 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1279 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1280 } else {
1281 // reg to reg
1282 // In theory these overlap but the ordering is such that this is likely a nop
1283 if ( src.first() != dst.first()) {
1284 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1285 }
1286 }
1287 }
1288
1289 // A long move
long_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1290 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1291
1292 // The calling conventions assures us that each VMregpair is either
1293 // all really one physical register or adjacent stack slots.
1294
1295 if (src.is_single_phys_reg() ) {
1296 if (dst.is_single_phys_reg()) {
1297 if (dst.first() != src.first()) {
1298 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1299 }
1300 } else {
1301 assert(dst.is_single_reg(), "not a stack pair");
1302 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1303 }
1304 } else if (dst.is_single_phys_reg()) {
1305 assert(src.is_single_reg(), "not a stack pair");
1306 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1307 } else {
1308 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1309 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1310 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1311 }
1312 }
1313
1314 // A double move
double_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1315 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1316
1317 // The calling conventions assures us that each VMregpair is either
1318 // all really one physical register or adjacent stack slots.
1319
1320 if (src.is_single_phys_reg() ) {
1321 if (dst.is_single_phys_reg()) {
1322 // In theory these overlap but the ordering is such that this is likely a nop
1323 if ( src.first() != dst.first()) {
1324 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1325 }
1326 } else {
1327 assert(dst.is_single_reg(), "not a stack pair");
1328 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1329 }
1330 } else if (dst.is_single_phys_reg()) {
1331 assert(src.is_single_reg(), "not a stack pair");
1332 __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1333 } else {
1334 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1335 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1336 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1337 }
1338 }
1339
1340
save_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1341 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1342 // We always ignore the frame_slots arg and just use the space just below frame pointer
1343 // which by this time is free to use
1344 switch (ret_type) {
1345 case T_FLOAT:
1346 __ movflt(Address(rbp, -wordSize), xmm0);
1347 break;
1348 case T_DOUBLE:
1349 __ movdbl(Address(rbp, -wordSize), xmm0);
1350 break;
1351 case T_VOID: break;
1352 default: {
1353 __ movptr(Address(rbp, -wordSize), rax);
1354 }
1355 }
1356 }
1357
restore_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1358 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1359 // We always ignore the frame_slots arg and just use the space just below frame pointer
1360 // which by this time is free to use
1361 switch (ret_type) {
1362 case T_FLOAT:
1363 __ movflt(xmm0, Address(rbp, -wordSize));
1364 break;
1365 case T_DOUBLE:
1366 __ movdbl(xmm0, Address(rbp, -wordSize));
1367 break;
1368 case T_VOID: break;
1369 default: {
1370 __ movptr(rax, Address(rbp, -wordSize));
1371 }
1372 }
1373 }
1374
save_args(MacroAssembler * masm,int arg_count,int first_arg,VMRegPair * args)1375 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1376 for ( int i = first_arg ; i < arg_count ; i++ ) {
1377 if (args[i].first()->is_Register()) {
1378 __ push(args[i].first()->as_Register());
1379 } else if (args[i].first()->is_XMMRegister()) {
1380 __ subptr(rsp, 2*wordSize);
1381 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1382 }
1383 }
1384 }
1385
restore_args(MacroAssembler * masm,int arg_count,int first_arg,VMRegPair * args)1386 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1387 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1388 if (args[i].first()->is_Register()) {
1389 __ pop(args[i].first()->as_Register());
1390 } else if (args[i].first()->is_XMMRegister()) {
1391 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1392 __ addptr(rsp, 2*wordSize);
1393 }
1394 }
1395 }
1396
1397 // Unpack an array argument into a pointer to the body and the length
1398 // if the array is non-null, otherwise pass 0 for both.
unpack_array_argument(MacroAssembler * masm,VMRegPair reg,BasicType in_elem_type,VMRegPair body_arg,VMRegPair length_arg)1399 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1400 Register tmp_reg = rax;
1401 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1402 "possible collision");
1403 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1404 "possible collision");
1405
1406 __ block_comment("unpack_array_argument {");
1407
1408 // Pass the length, ptr pair
1409 Label is_null, done;
1410 VMRegPair tmp;
1411 tmp.set_ptr(tmp_reg->as_VMReg());
1412 if (reg.first()->is_stack()) {
1413 // Load the arg up from the stack
1414 move_ptr(masm, reg, tmp);
1415 reg = tmp;
1416 }
1417 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1418 __ jccb(Assembler::equal, is_null);
1419 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1420 move_ptr(masm, tmp, body_arg);
1421 // load the length relative to the body.
1422 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1423 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1424 move32_64(masm, tmp, length_arg);
1425 __ jmpb(done);
1426 __ bind(is_null);
1427 // Pass zeros
1428 __ xorptr(tmp_reg, tmp_reg);
1429 move_ptr(masm, tmp, body_arg);
1430 move32_64(masm, tmp, length_arg);
1431 __ bind(done);
1432
1433 __ block_comment("} unpack_array_argument");
1434 }
1435
1436
1437 // Different signatures may require very different orders for the move
1438 // to avoid clobbering other arguments. There's no simple way to
1439 // order them safely. Compute a safe order for issuing stores and
1440 // break any cycles in those stores. This code is fairly general but
1441 // it's not necessary on the other platforms so we keep it in the
1442 // platform dependent code instead of moving it into a shared file.
1443 // (See bugs 7013347 & 7145024.)
1444 // Note that this code is specific to LP64.
1445 class ComputeMoveOrder: public StackObj {
1446 class MoveOperation: public ResourceObj {
1447 friend class ComputeMoveOrder;
1448 private:
1449 VMRegPair _src;
1450 VMRegPair _dst;
1451 int _src_index;
1452 int _dst_index;
1453 bool _processed;
1454 MoveOperation* _next;
1455 MoveOperation* _prev;
1456
get_id(VMRegPair r)1457 static int get_id(VMRegPair r) {
1458 return r.first()->value();
1459 }
1460
1461 public:
MoveOperation(int src_index,VMRegPair src,int dst_index,VMRegPair dst)1462 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1463 _src(src)
1464 , _dst(dst)
1465 , _src_index(src_index)
1466 , _dst_index(dst_index)
1467 , _processed(false)
1468 , _next(NULL)
1469 , _prev(NULL) {
1470 }
1471
src() const1472 VMRegPair src() const { return _src; }
src_id() const1473 int src_id() const { return get_id(src()); }
src_index() const1474 int src_index() const { return _src_index; }
dst() const1475 VMRegPair dst() const { return _dst; }
set_dst(int i,VMRegPair dst)1476 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
dst_index() const1477 int dst_index() const { return _dst_index; }
dst_id() const1478 int dst_id() const { return get_id(dst()); }
next() const1479 MoveOperation* next() const { return _next; }
prev() const1480 MoveOperation* prev() const { return _prev; }
set_processed()1481 void set_processed() { _processed = true; }
is_processed() const1482 bool is_processed() const { return _processed; }
1483
1484 // insert
break_cycle(VMRegPair temp_register)1485 void break_cycle(VMRegPair temp_register) {
1486 // create a new store following the last store
1487 // to move from the temp_register to the original
1488 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1489
1490 // break the cycle of links and insert new_store at the end
1491 // break the reverse link.
1492 MoveOperation* p = prev();
1493 assert(p->next() == this, "must be");
1494 _prev = NULL;
1495 p->_next = new_store;
1496 new_store->_prev = p;
1497
1498 // change the original store to save it's value in the temp.
1499 set_dst(-1, temp_register);
1500 }
1501
link(GrowableArray<MoveOperation * > & killer)1502 void link(GrowableArray<MoveOperation*>& killer) {
1503 // link this store in front the store that it depends on
1504 MoveOperation* n = killer.at_grow(src_id(), NULL);
1505 if (n != NULL) {
1506 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1507 _next = n;
1508 n->_prev = this;
1509 }
1510 }
1511 };
1512
1513 private:
1514 GrowableArray<MoveOperation*> edges;
1515
1516 public:
ComputeMoveOrder(int total_in_args,VMRegPair * in_regs,int total_c_args,VMRegPair * out_regs,BasicType * in_sig_bt,GrowableArray<int> & arg_order,VMRegPair tmp_vmreg)1517 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1518 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1519 // Move operations where the dest is the stack can all be
1520 // scheduled first since they can't interfere with the other moves.
1521 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1522 if (in_sig_bt[i] == T_ARRAY) {
1523 c_arg--;
1524 if (out_regs[c_arg].first()->is_stack() &&
1525 out_regs[c_arg + 1].first()->is_stack()) {
1526 arg_order.push(i);
1527 arg_order.push(c_arg);
1528 } else {
1529 if (out_regs[c_arg].first()->is_stack() ||
1530 in_regs[i].first() == out_regs[c_arg].first()) {
1531 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1532 } else {
1533 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1534 }
1535 }
1536 } else if (in_sig_bt[i] == T_VOID) {
1537 arg_order.push(i);
1538 arg_order.push(c_arg);
1539 } else {
1540 if (out_regs[c_arg].first()->is_stack() ||
1541 in_regs[i].first() == out_regs[c_arg].first()) {
1542 arg_order.push(i);
1543 arg_order.push(c_arg);
1544 } else {
1545 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1546 }
1547 }
1548 }
1549 // Break any cycles in the register moves and emit the in the
1550 // proper order.
1551 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1552 for (int i = 0; i < stores->length(); i++) {
1553 arg_order.push(stores->at(i)->src_index());
1554 arg_order.push(stores->at(i)->dst_index());
1555 }
1556 }
1557
1558 // Collected all the move operations
add_edge(int src_index,VMRegPair src,int dst_index,VMRegPair dst)1559 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1560 if (src.first() == dst.first()) return;
1561 edges.append(new MoveOperation(src_index, src, dst_index, dst));
1562 }
1563
1564 // Walk the edges breaking cycles between moves. The result list
1565 // can be walked in order to produce the proper set of loads
get_store_order(VMRegPair temp_register)1566 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1567 // Record which moves kill which values
1568 GrowableArray<MoveOperation*> killer;
1569 for (int i = 0; i < edges.length(); i++) {
1570 MoveOperation* s = edges.at(i);
1571 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1572 killer.at_put_grow(s->dst_id(), s, NULL);
1573 }
1574 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1575 "make sure temp isn't in the registers that are killed");
1576
1577 // create links between loads and stores
1578 for (int i = 0; i < edges.length(); i++) {
1579 edges.at(i)->link(killer);
1580 }
1581
1582 // at this point, all the move operations are chained together
1583 // in a doubly linked list. Processing it backwards finds
1584 // the beginning of the chain, forwards finds the end. If there's
1585 // a cycle it can be broken at any point, so pick an edge and walk
1586 // backward until the list ends or we end where we started.
1587 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1588 for (int e = 0; e < edges.length(); e++) {
1589 MoveOperation* s = edges.at(e);
1590 if (!s->is_processed()) {
1591 MoveOperation* start = s;
1592 // search for the beginning of the chain or cycle
1593 while (start->prev() != NULL && start->prev() != s) {
1594 start = start->prev();
1595 }
1596 if (start->prev() == s) {
1597 start->break_cycle(temp_register);
1598 }
1599 // walk the chain forward inserting to store list
1600 while (start != NULL) {
1601 stores->append(start);
1602 start->set_processed();
1603 start = start->next();
1604 }
1605 }
1606 }
1607 return stores;
1608 }
1609 };
1610
verify_oop_args(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1611 static void verify_oop_args(MacroAssembler* masm,
1612 const methodHandle& method,
1613 const BasicType* sig_bt,
1614 const VMRegPair* regs) {
1615 Register temp_reg = rbx; // not part of any compiled calling seq
1616 if (VerifyOops) {
1617 for (int i = 0; i < method->size_of_parameters(); i++) {
1618 if (is_reference_type(sig_bt[i])) {
1619 VMReg r = regs[i].first();
1620 assert(r->is_valid(), "bad oop arg");
1621 if (r->is_stack()) {
1622 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1623 __ verify_oop(temp_reg);
1624 } else {
1625 __ verify_oop(r->as_Register());
1626 }
1627 }
1628 }
1629 }
1630 }
1631
gen_special_dispatch(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1632 static void gen_special_dispatch(MacroAssembler* masm,
1633 const methodHandle& method,
1634 const BasicType* sig_bt,
1635 const VMRegPair* regs) {
1636 verify_oop_args(masm, method, sig_bt, regs);
1637 vmIntrinsics::ID iid = method->intrinsic_id();
1638
1639 // Now write the args into the outgoing interpreter space
1640 bool has_receiver = false;
1641 Register receiver_reg = noreg;
1642 int member_arg_pos = -1;
1643 Register member_reg = noreg;
1644 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1645 if (ref_kind != 0) {
1646 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1647 member_reg = rbx; // known to be free at this point
1648 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1649 } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
1650 has_receiver = true;
1651 } else {
1652 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1653 }
1654
1655 if (member_reg != noreg) {
1656 // Load the member_arg into register, if necessary.
1657 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1658 VMReg r = regs[member_arg_pos].first();
1659 if (r->is_stack()) {
1660 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1661 } else {
1662 // no data motion is needed
1663 member_reg = r->as_Register();
1664 }
1665 }
1666
1667 if (has_receiver) {
1668 // Make sure the receiver is loaded into a register.
1669 assert(method->size_of_parameters() > 0, "oob");
1670 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1671 VMReg r = regs[0].first();
1672 assert(r->is_valid(), "bad receiver arg");
1673 if (r->is_stack()) {
1674 // Porting note: This assumes that compiled calling conventions always
1675 // pass the receiver oop in a register. If this is not true on some
1676 // platform, pick a temp and load the receiver from stack.
1677 fatal("receiver always in a register");
1678 receiver_reg = j_rarg0; // known to be free at this point
1679 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1680 } else {
1681 // no data motion is needed
1682 receiver_reg = r->as_Register();
1683 }
1684 }
1685
1686 // Figure out which address we are really jumping to:
1687 MethodHandles::generate_method_handle_dispatch(masm, iid,
1688 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1689 }
1690
1691 // ---------------------------------------------------------------------------
1692 // Generate a native wrapper for a given method. The method takes arguments
1693 // in the Java compiled code convention, marshals them to the native
1694 // convention (handlizes oops, etc), transitions to native, makes the call,
1695 // returns to java state (possibly blocking), unhandlizes any result and
1696 // returns.
1697 //
1698 // Critical native functions are a shorthand for the use of
1699 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1700 // functions. The wrapper is expected to unpack the arguments before
1701 // passing them to the callee. Critical native functions leave the state _in_Java,
1702 // since they cannot stop for GC.
1703 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1704 // block and the check for pending exceptions it's impossible for them
1705 // to be thrown.
1706 //
generate_native_wrapper(MacroAssembler * masm,const methodHandle & method,int compile_id,BasicType * in_sig_bt,VMRegPair * in_regs,BasicType ret_type,address critical_entry)1707 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1708 const methodHandle& method,
1709 int compile_id,
1710 BasicType* in_sig_bt,
1711 VMRegPair* in_regs,
1712 BasicType ret_type,
1713 address critical_entry) {
1714 if (method->is_method_handle_intrinsic()) {
1715 vmIntrinsics::ID iid = method->intrinsic_id();
1716 intptr_t start = (intptr_t)__ pc();
1717 int vep_offset = ((intptr_t)__ pc()) - start;
1718 gen_special_dispatch(masm,
1719 method,
1720 in_sig_bt,
1721 in_regs);
1722 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1723 __ flush();
1724 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1725 return nmethod::new_native_nmethod(method,
1726 compile_id,
1727 masm->code(),
1728 vep_offset,
1729 frame_complete,
1730 stack_slots / VMRegImpl::slots_per_word,
1731 in_ByteSize(-1),
1732 in_ByteSize(-1),
1733 (OopMapSet*)NULL);
1734 }
1735 bool is_critical_native = true;
1736 address native_func = critical_entry;
1737 if (native_func == NULL) {
1738 native_func = method->native_function();
1739 is_critical_native = false;
1740 }
1741 assert(native_func != NULL, "must have function");
1742
1743 // An OopMap for lock (and class if static)
1744 OopMapSet *oop_maps = new OopMapSet();
1745 intptr_t start = (intptr_t)__ pc();
1746
1747 // We have received a description of where all the java arg are located
1748 // on entry to the wrapper. We need to convert these args to where
1749 // the jni function will expect them. To figure out where they go
1750 // we convert the java signature to a C signature by inserting
1751 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1752
1753 const int total_in_args = method->size_of_parameters();
1754 int total_c_args = total_in_args;
1755 if (!is_critical_native) {
1756 total_c_args += 1;
1757 if (method->is_static()) {
1758 total_c_args++;
1759 }
1760 } else {
1761 for (int i = 0; i < total_in_args; i++) {
1762 if (in_sig_bt[i] == T_ARRAY) {
1763 total_c_args++;
1764 }
1765 }
1766 }
1767
1768 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1769 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1770 BasicType* in_elem_bt = NULL;
1771
1772 int argc = 0;
1773 if (!is_critical_native) {
1774 out_sig_bt[argc++] = T_ADDRESS;
1775 if (method->is_static()) {
1776 out_sig_bt[argc++] = T_OBJECT;
1777 }
1778
1779 for (int i = 0; i < total_in_args ; i++ ) {
1780 out_sig_bt[argc++] = in_sig_bt[i];
1781 }
1782 } else {
1783 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1784 SignatureStream ss(method->signature());
1785 for (int i = 0; i < total_in_args ; i++ ) {
1786 if (in_sig_bt[i] == T_ARRAY) {
1787 // Arrays are passed as int, elem* pair
1788 out_sig_bt[argc++] = T_INT;
1789 out_sig_bt[argc++] = T_ADDRESS;
1790 ss.skip_array_prefix(1); // skip one '['
1791 assert(ss.is_primitive(), "primitive type expected");
1792 in_elem_bt[i] = ss.type();
1793 } else {
1794 out_sig_bt[argc++] = in_sig_bt[i];
1795 in_elem_bt[i] = T_VOID;
1796 }
1797 if (in_sig_bt[i] != T_VOID) {
1798 assert(in_sig_bt[i] == ss.type() ||
1799 in_sig_bt[i] == T_ARRAY, "must match");
1800 ss.next();
1801 }
1802 }
1803 }
1804
1805 // Now figure out where the args must be stored and how much stack space
1806 // they require.
1807 int out_arg_slots;
1808 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1809
1810 // Compute framesize for the wrapper. We need to handlize all oops in
1811 // incoming registers
1812
1813 // Calculate the total number of stack slots we will need.
1814
1815 // First count the abi requirement plus all of the outgoing args
1816 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1817
1818 // Now the space for the inbound oop handle area
1819 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
1820 if (is_critical_native) {
1821 // Critical natives may have to call out so they need a save area
1822 // for register arguments.
1823 int double_slots = 0;
1824 int single_slots = 0;
1825 for ( int i = 0; i < total_in_args; i++) {
1826 if (in_regs[i].first()->is_Register()) {
1827 const Register reg = in_regs[i].first()->as_Register();
1828 switch (in_sig_bt[i]) {
1829 case T_BOOLEAN:
1830 case T_BYTE:
1831 case T_SHORT:
1832 case T_CHAR:
1833 case T_INT: single_slots++; break;
1834 case T_ARRAY: // specific to LP64 (7145024)
1835 case T_LONG: double_slots++; break;
1836 default: ShouldNotReachHere();
1837 }
1838 } else if (in_regs[i].first()->is_XMMRegister()) {
1839 switch (in_sig_bt[i]) {
1840 case T_FLOAT: single_slots++; break;
1841 case T_DOUBLE: double_slots++; break;
1842 default: ShouldNotReachHere();
1843 }
1844 } else if (in_regs[i].first()->is_FloatRegister()) {
1845 ShouldNotReachHere();
1846 }
1847 }
1848 total_save_slots = double_slots * 2 + single_slots;
1849 // align the save area
1850 if (double_slots != 0) {
1851 stack_slots = align_up(stack_slots, 2);
1852 }
1853 }
1854
1855 int oop_handle_offset = stack_slots;
1856 stack_slots += total_save_slots;
1857
1858 // Now any space we need for handlizing a klass if static method
1859
1860 int klass_slot_offset = 0;
1861 int klass_offset = -1;
1862 int lock_slot_offset = 0;
1863 bool is_static = false;
1864
1865 if (method->is_static()) {
1866 klass_slot_offset = stack_slots;
1867 stack_slots += VMRegImpl::slots_per_word;
1868 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1869 is_static = true;
1870 }
1871
1872 // Plus a lock if needed
1873
1874 if (method->is_synchronized()) {
1875 lock_slot_offset = stack_slots;
1876 stack_slots += VMRegImpl::slots_per_word;
1877 }
1878
1879 // Now a place (+2) to save return values or temp during shuffling
1880 // + 4 for return address (which we own) and saved rbp
1881 stack_slots += 6;
1882
1883 // Ok The space we have allocated will look like:
1884 //
1885 //
1886 // FP-> | |
1887 // |---------------------|
1888 // | 2 slots for moves |
1889 // |---------------------|
1890 // | lock box (if sync) |
1891 // |---------------------| <- lock_slot_offset
1892 // | klass (if static) |
1893 // |---------------------| <- klass_slot_offset
1894 // | oopHandle area |
1895 // |---------------------| <- oop_handle_offset (6 java arg registers)
1896 // | outbound memory |
1897 // | based arguments |
1898 // | |
1899 // |---------------------|
1900 // | |
1901 // SP-> | out_preserved_slots |
1902 //
1903 //
1904
1905
1906 // Now compute actual number of stack words we need rounding to make
1907 // stack properly aligned.
1908 stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1909
1910 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1911
1912 // First thing make an ic check to see if we should even be here
1913
1914 // We are free to use all registers as temps without saving them and
1915 // restoring them except rbp. rbp is the only callee save register
1916 // as far as the interpreter and the compiler(s) are concerned.
1917
1918
1919 const Register ic_reg = rax;
1920 const Register receiver = j_rarg0;
1921
1922 Label hit;
1923 Label exception_pending;
1924
1925 assert_different_registers(ic_reg, receiver, rscratch1);
1926 __ verify_oop(receiver);
1927 __ load_klass(rscratch1, receiver, rscratch2);
1928 __ cmpq(ic_reg, rscratch1);
1929 __ jcc(Assembler::equal, hit);
1930
1931 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1932
1933 // Verified entry point must be aligned
1934 __ align(8);
1935
1936 __ bind(hit);
1937
1938 int vep_offset = ((intptr_t)__ pc()) - start;
1939
1940 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1941 Label L_skip_barrier;
1942 Register klass = r10;
1943 __ mov_metadata(klass, method->method_holder()); // InstanceKlass*
1944 __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
1945
1946 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
1947
1948 __ bind(L_skip_barrier);
1949 }
1950
1951 #ifdef COMPILER1
1952 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
1953 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1954 inline_check_hashcode_from_object_header(masm, method, j_rarg0 /*obj_reg*/, rax /*result*/);
1955 }
1956 #endif // COMPILER1
1957
1958 // The instruction at the verified entry point must be 5 bytes or longer
1959 // because it can be patched on the fly by make_non_entrant. The stack bang
1960 // instruction fits that requirement.
1961
1962 // Generate stack overflow check
1963
1964 if (UseStackBanging) {
1965 __ bang_stack_with_offset((int)StackOverflow::stack_shadow_zone_size());
1966 } else {
1967 // need a 5 byte instruction to allow MT safe patching to non-entrant
1968 __ fat_nop();
1969 }
1970
1971 // Generate a new frame for the wrapper.
1972 __ enter();
1973 // -2 because return address is already present and so is saved rbp
1974 __ subptr(rsp, stack_size - 2*wordSize);
1975
1976 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1977 bs->nmethod_entry_barrier(masm);
1978
1979 // Frame is now completed as far as size and linkage.
1980 int frame_complete = ((intptr_t)__ pc()) - start;
1981
1982 if (UseRTMLocking) {
1983 // Abort RTM transaction before calling JNI
1984 // because critical section will be large and will be
1985 // aborted anyway. Also nmethod could be deoptimized.
1986 __ xabort(0);
1987 }
1988
1989 #ifdef ASSERT
1990 {
1991 Label L;
1992 __ mov(rax, rsp);
1993 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
1994 __ cmpptr(rax, rsp);
1995 __ jcc(Assembler::equal, L);
1996 __ stop("improperly aligned stack");
1997 __ bind(L);
1998 }
1999 #endif /* ASSERT */
2000
2001
2002 // We use r14 as the oop handle for the receiver/klass
2003 // It is callee save so it survives the call to native
2004
2005 const Register oop_handle_reg = r14;
2006
2007 //
2008 // We immediately shuffle the arguments so that any vm call we have to
2009 // make from here on out (sync slow path, jvmti, etc.) we will have
2010 // captured the oops from our caller and have a valid oopMap for
2011 // them.
2012
2013 // -----------------
2014 // The Grand Shuffle
2015
2016 // The Java calling convention is either equal (linux) or denser (win64) than the
2017 // c calling convention. However the because of the jni_env argument the c calling
2018 // convention always has at least one more (and two for static) arguments than Java.
2019 // Therefore if we move the args from java -> c backwards then we will never have
2020 // a register->register conflict and we don't have to build a dependency graph
2021 // and figure out how to break any cycles.
2022 //
2023
2024 // Record esp-based slot for receiver on stack for non-static methods
2025 int receiver_offset = -1;
2026
2027 // This is a trick. We double the stack slots so we can claim
2028 // the oops in the caller's frame. Since we are sure to have
2029 // more args than the caller doubling is enough to make
2030 // sure we can capture all the incoming oop args from the
2031 // caller.
2032 //
2033 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2034
2035 // Mark location of rbp (someday)
2036 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2037
2038 // Use eax, ebx as temporaries during any memory-memory moves we have to do
2039 // All inbound args are referenced based on rbp and all outbound args via rsp.
2040
2041
2042 #ifdef ASSERT
2043 bool reg_destroyed[RegisterImpl::number_of_registers];
2044 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2045 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2046 reg_destroyed[r] = false;
2047 }
2048 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2049 freg_destroyed[f] = false;
2050 }
2051
2052 #endif /* ASSERT */
2053
2054 // This may iterate in two different directions depending on the
2055 // kind of native it is. The reason is that for regular JNI natives
2056 // the incoming and outgoing registers are offset upwards and for
2057 // critical natives they are offset down.
2058 GrowableArray<int> arg_order(2 * total_in_args);
2059
2060 VMRegPair tmp_vmreg;
2061 tmp_vmreg.set2(rbx->as_VMReg());
2062
2063 if (!is_critical_native) {
2064 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2065 arg_order.push(i);
2066 arg_order.push(c_arg);
2067 }
2068 } else {
2069 // Compute a valid move order, using tmp_vmreg to break any cycles
2070 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2071 }
2072
2073 int temploc = -1;
2074 for (int ai = 0; ai < arg_order.length(); ai += 2) {
2075 int i = arg_order.at(ai);
2076 int c_arg = arg_order.at(ai + 1);
2077 __ block_comment(err_msg("move %d -> %d", i, c_arg));
2078 if (c_arg == -1) {
2079 assert(is_critical_native, "should only be required for critical natives");
2080 // This arg needs to be moved to a temporary
2081 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2082 in_regs[i] = tmp_vmreg;
2083 temploc = i;
2084 continue;
2085 } else if (i == -1) {
2086 assert(is_critical_native, "should only be required for critical natives");
2087 // Read from the temporary location
2088 assert(temploc != -1, "must be valid");
2089 i = temploc;
2090 temploc = -1;
2091 }
2092 #ifdef ASSERT
2093 if (in_regs[i].first()->is_Register()) {
2094 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2095 } else if (in_regs[i].first()->is_XMMRegister()) {
2096 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2097 }
2098 if (out_regs[c_arg].first()->is_Register()) {
2099 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2100 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2101 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2102 }
2103 #endif /* ASSERT */
2104 switch (in_sig_bt[i]) {
2105 case T_ARRAY:
2106 if (is_critical_native) {
2107 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2108 c_arg++;
2109 #ifdef ASSERT
2110 if (out_regs[c_arg].first()->is_Register()) {
2111 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2112 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2113 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2114 }
2115 #endif
2116 break;
2117 }
2118 case T_OBJECT:
2119 assert(!is_critical_native, "no oop arguments");
2120 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2121 ((i == 0) && (!is_static)),
2122 &receiver_offset);
2123 break;
2124 case T_VOID:
2125 break;
2126
2127 case T_FLOAT:
2128 float_move(masm, in_regs[i], out_regs[c_arg]);
2129 break;
2130
2131 case T_DOUBLE:
2132 assert( i + 1 < total_in_args &&
2133 in_sig_bt[i + 1] == T_VOID &&
2134 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2135 double_move(masm, in_regs[i], out_regs[c_arg]);
2136 break;
2137
2138 case T_LONG :
2139 long_move(masm, in_regs[i], out_regs[c_arg]);
2140 break;
2141
2142 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2143
2144 default:
2145 move32_64(masm, in_regs[i], out_regs[c_arg]);
2146 }
2147 }
2148
2149 int c_arg;
2150
2151 // Pre-load a static method's oop into r14. Used both by locking code and
2152 // the normal JNI call code.
2153 if (!is_critical_native) {
2154 // point c_arg at the first arg that is already loaded in case we
2155 // need to spill before we call out
2156 c_arg = total_c_args - total_in_args;
2157
2158 if (method->is_static()) {
2159
2160 // load oop into a register
2161 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2162
2163 // Now handlize the static class mirror it's known not-null.
2164 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2165 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2166
2167 // Now get the handle
2168 __ lea(oop_handle_reg, Address(rsp, klass_offset));
2169 // store the klass handle as second argument
2170 __ movptr(c_rarg1, oop_handle_reg);
2171 // and protect the arg if we must spill
2172 c_arg--;
2173 }
2174 } else {
2175 // For JNI critical methods we need to save all registers in save_args.
2176 c_arg = 0;
2177 }
2178
2179 // Change state to native (we save the return address in the thread, since it might not
2180 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2181 // points into the right code segment. It does not have to be the correct return pc.
2182 // We use the same pc/oopMap repeatedly when we call out
2183
2184 intptr_t the_pc = (intptr_t) __ pc();
2185 oop_maps->add_gc_map(the_pc - start, map);
2186
2187 __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2188
2189
2190 // We have all of the arguments setup at this point. We must not touch any register
2191 // argument registers at this point (what if we save/restore them there are no oop?
2192
2193 {
2194 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2195 // protect the args we've loaded
2196 save_args(masm, total_c_args, c_arg, out_regs);
2197 __ mov_metadata(c_rarg1, method());
2198 __ call_VM_leaf(
2199 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2200 r15_thread, c_rarg1);
2201 restore_args(masm, total_c_args, c_arg, out_regs);
2202 }
2203
2204 // RedefineClasses() tracing support for obsolete method entry
2205 if (log_is_enabled(Trace, redefine, class, obsolete)) {
2206 // protect the args we've loaded
2207 save_args(masm, total_c_args, c_arg, out_regs);
2208 __ mov_metadata(c_rarg1, method());
2209 __ call_VM_leaf(
2210 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2211 r15_thread, c_rarg1);
2212 restore_args(masm, total_c_args, c_arg, out_regs);
2213 }
2214
2215 // Lock a synchronized method
2216
2217 // Register definitions used by locking and unlocking
2218
2219 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
2220 const Register obj_reg = rbx; // Will contain the oop
2221 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
2222 const Register old_hdr = r13; // value of old header at unlock time
2223
2224 Label slow_path_lock;
2225 Label lock_done;
2226
2227 if (method->is_synchronized()) {
2228 assert(!is_critical_native, "unhandled");
2229
2230
2231 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2232
2233 // Get the handle (the 2nd argument)
2234 __ mov(oop_handle_reg, c_rarg1);
2235
2236 // Get address of the box
2237
2238 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2239
2240 // Load the oop from the handle
2241 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2242
2243 __ resolve(IS_NOT_NULL, obj_reg);
2244 if (UseBiasedLocking) {
2245 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
2246 }
2247
2248 // Load immediate 1 into swap_reg %rax
2249 __ movl(swap_reg, 1);
2250
2251 // Load (object->mark() | 1) into swap_reg %rax
2252 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2253
2254 // Save (object->mark() | 1) into BasicLock's displaced header
2255 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2256
2257 // src -> dest iff dest == rax else rax <- dest
2258 __ lock();
2259 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2260 __ jcc(Assembler::equal, lock_done);
2261
2262 // Hmm should this move to the slow path code area???
2263
2264 // Test if the oopMark is an obvious stack pointer, i.e.,
2265 // 1) (mark & 3) == 0, and
2266 // 2) rsp <= mark < mark + os::pagesize()
2267 // These 3 tests can be done by evaluating the following
2268 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2269 // assuming both stack pointer and pagesize have their
2270 // least significant 2 bits clear.
2271 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2272
2273 __ subptr(swap_reg, rsp);
2274 __ andptr(swap_reg, 3 - os::vm_page_size());
2275
2276 // Save the test result, for recursive case, the result is zero
2277 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2278 __ jcc(Assembler::notEqual, slow_path_lock);
2279
2280 // Slow path will re-enter here
2281
2282 __ bind(lock_done);
2283 }
2284
2285 // Finally just about ready to make the JNI call
2286
2287 // get JNIEnv* which is first argument to native
2288 if (!is_critical_native) {
2289 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2290
2291 // Now set thread in native
2292 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2293 }
2294
2295 __ call(RuntimeAddress(native_func));
2296
2297 // Verify or restore cpu control state after JNI call
2298 __ restore_cpu_control_state_after_jni();
2299
2300 // Unpack native results.
2301 switch (ret_type) {
2302 case T_BOOLEAN: __ c2bool(rax); break;
2303 case T_CHAR : __ movzwl(rax, rax); break;
2304 case T_BYTE : __ sign_extend_byte (rax); break;
2305 case T_SHORT : __ sign_extend_short(rax); break;
2306 case T_INT : /* nothing to do */ break;
2307 case T_DOUBLE :
2308 case T_FLOAT :
2309 // Result is in xmm0 we'll save as needed
2310 break;
2311 case T_ARRAY: // Really a handle
2312 case T_OBJECT: // Really a handle
2313 break; // can't de-handlize until after safepoint check
2314 case T_VOID: break;
2315 case T_LONG: break;
2316 default : ShouldNotReachHere();
2317 }
2318
2319 Label after_transition;
2320
2321 // If this is a critical native, check for a safepoint or suspend request after the call.
2322 // If a safepoint is needed, transition to native, then to native_trans to handle
2323 // safepoints like the native methods that are not critical natives.
2324 if (is_critical_native) {
2325 Label needs_safepoint;
2326 __ safepoint_poll(needs_safepoint, r15_thread, false /* at_return */, false /* in_nmethod */);
2327 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2328 __ jcc(Assembler::equal, after_transition);
2329 __ bind(needs_safepoint);
2330 }
2331
2332 // Switch thread to "native transition" state before reading the synchronization state.
2333 // This additional state is necessary because reading and testing the synchronization
2334 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2335 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2336 // VM thread changes sync state to synchronizing and suspends threads for GC.
2337 // Thread A is resumed to finish this native method, but doesn't block here since it
2338 // didn't see any synchronization is progress, and escapes.
2339 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2340
2341 // Force this write out before the read below
2342 __ membar(Assembler::Membar_mask_bits(
2343 Assembler::LoadLoad | Assembler::LoadStore |
2344 Assembler::StoreLoad | Assembler::StoreStore));
2345
2346 // check for safepoint operation in progress and/or pending suspend requests
2347 {
2348 Label Continue;
2349 Label slow_path;
2350
2351 __ safepoint_poll(slow_path, r15_thread, true /* at_return */, false /* in_nmethod */);
2352
2353 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2354 __ jcc(Assembler::equal, Continue);
2355 __ bind(slow_path);
2356
2357 // Don't use call_VM as it will see a possible pending exception and forward it
2358 // and never return here preventing us from clearing _last_native_pc down below.
2359 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2360 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2361 // by hand.
2362 //
2363 __ vzeroupper();
2364 save_native_result(masm, ret_type, stack_slots);
2365 __ mov(c_rarg0, r15_thread);
2366 __ mov(r12, rsp); // remember sp
2367 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2368 __ andptr(rsp, -16); // align stack as required by ABI
2369 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2370 __ mov(rsp, r12); // restore sp
2371 __ reinit_heapbase();
2372 // Restore any method result value
2373 restore_native_result(masm, ret_type, stack_slots);
2374 __ bind(Continue);
2375 }
2376
2377 // change thread state
2378 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2379 __ bind(after_transition);
2380
2381 Label reguard;
2382 Label reguard_done;
2383 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2384 __ jcc(Assembler::equal, reguard);
2385 __ bind(reguard_done);
2386
2387 // native result if any is live
2388
2389 // Unlock
2390 Label unlock_done;
2391 Label slow_path_unlock;
2392 if (method->is_synchronized()) {
2393
2394 // Get locked oop from the handle we passed to jni
2395 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2396 __ resolve(IS_NOT_NULL, obj_reg);
2397
2398 Label done;
2399
2400 if (UseBiasedLocking) {
2401 __ biased_locking_exit(obj_reg, old_hdr, done);
2402 }
2403
2404 // Simple recursive lock?
2405
2406 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2407 __ jcc(Assembler::equal, done);
2408
2409 // Must save rax if if it is live now because cmpxchg must use it
2410 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2411 save_native_result(masm, ret_type, stack_slots);
2412 }
2413
2414
2415 // get address of the stack lock
2416 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2417 // get old displaced header
2418 __ movptr(old_hdr, Address(rax, 0));
2419
2420 // Atomic swap old header if oop still contains the stack lock
2421 __ lock();
2422 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2423 __ jcc(Assembler::notEqual, slow_path_unlock);
2424
2425 // slow path re-enters here
2426 __ bind(unlock_done);
2427 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2428 restore_native_result(masm, ret_type, stack_slots);
2429 }
2430
2431 __ bind(done);
2432
2433 }
2434 {
2435 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2436 save_native_result(masm, ret_type, stack_slots);
2437 __ mov_metadata(c_rarg1, method());
2438 __ call_VM_leaf(
2439 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2440 r15_thread, c_rarg1);
2441 restore_native_result(masm, ret_type, stack_slots);
2442 }
2443
2444 __ reset_last_Java_frame(false);
2445
2446 // Unbox oop result, e.g. JNIHandles::resolve value.
2447 if (is_reference_type(ret_type)) {
2448 __ resolve_jobject(rax /* value */,
2449 r15_thread /* thread */,
2450 rcx /* tmp */);
2451 }
2452
2453 if (CheckJNICalls) {
2454 // clear_pending_jni_exception_check
2455 __ movptr(Address(r15_thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
2456 }
2457
2458 if (!is_critical_native) {
2459 // reset handle block
2460 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2461 __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2462 }
2463
2464 // pop our frame
2465
2466 __ leave();
2467
2468 if (!is_critical_native) {
2469 // Any exception pending?
2470 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2471 __ jcc(Assembler::notEqual, exception_pending);
2472 }
2473
2474 // Return
2475
2476 __ ret(0);
2477
2478 // Unexpected paths are out of line and go here
2479
2480 if (!is_critical_native) {
2481 // forward the exception
2482 __ bind(exception_pending);
2483
2484 // and forward the exception
2485 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2486 }
2487
2488 // Slow path locking & unlocking
2489 if (method->is_synchronized()) {
2490
2491 // BEGIN Slow path lock
2492 __ bind(slow_path_lock);
2493
2494 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2495 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2496
2497 // protect the args we've loaded
2498 save_args(masm, total_c_args, c_arg, out_regs);
2499
2500 __ mov(c_rarg0, obj_reg);
2501 __ mov(c_rarg1, lock_reg);
2502 __ mov(c_rarg2, r15_thread);
2503
2504 // Not a leaf but we have last_Java_frame setup as we want
2505 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2506 restore_args(masm, total_c_args, c_arg, out_regs);
2507
2508 #ifdef ASSERT
2509 { Label L;
2510 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2511 __ jcc(Assembler::equal, L);
2512 __ stop("no pending exception allowed on exit from monitorenter");
2513 __ bind(L);
2514 }
2515 #endif
2516 __ jmp(lock_done);
2517
2518 // END Slow path lock
2519
2520 // BEGIN Slow path unlock
2521 __ bind(slow_path_unlock);
2522
2523 // If we haven't already saved the native result we must save it now as xmm registers
2524 // are still exposed.
2525 __ vzeroupper();
2526 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2527 save_native_result(masm, ret_type, stack_slots);
2528 }
2529
2530 __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2531
2532 __ mov(c_rarg0, obj_reg);
2533 __ mov(c_rarg2, r15_thread);
2534 __ mov(r12, rsp); // remember sp
2535 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2536 __ andptr(rsp, -16); // align stack as required by ABI
2537
2538 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2539 // NOTE that obj_reg == rbx currently
2540 __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2541 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2542
2543 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2544 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2545 __ mov(rsp, r12); // restore sp
2546 __ reinit_heapbase();
2547 #ifdef ASSERT
2548 {
2549 Label L;
2550 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2551 __ jcc(Assembler::equal, L);
2552 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2553 __ bind(L);
2554 }
2555 #endif /* ASSERT */
2556
2557 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
2558
2559 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2560 restore_native_result(masm, ret_type, stack_slots);
2561 }
2562 __ jmp(unlock_done);
2563
2564 // END Slow path unlock
2565
2566 } // synchronized
2567
2568 // SLOW PATH Reguard the stack if needed
2569
2570 __ bind(reguard);
2571 __ vzeroupper();
2572 save_native_result(masm, ret_type, stack_slots);
2573 __ mov(r12, rsp); // remember sp
2574 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2575 __ andptr(rsp, -16); // align stack as required by ABI
2576 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2577 __ mov(rsp, r12); // restore sp
2578 __ reinit_heapbase();
2579 restore_native_result(masm, ret_type, stack_slots);
2580 // and continue
2581 __ jmp(reguard_done);
2582
2583
2584
2585 __ flush();
2586
2587 nmethod *nm = nmethod::new_native_nmethod(method,
2588 compile_id,
2589 masm->code(),
2590 vep_offset,
2591 frame_complete,
2592 stack_slots / VMRegImpl::slots_per_word,
2593 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2594 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2595 oop_maps);
2596
2597 return nm;
2598 }
2599
2600 // this function returns the adjust size (in number of words) to a c2i adapter
2601 // activation for use during deoptimization
last_frame_adjust(int callee_parameters,int callee_locals)2602 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2603 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2604 }
2605
2606
out_preserve_stack_slots()2607 uint SharedRuntime::out_preserve_stack_slots() {
2608 return 0;
2609 }
2610
2611
2612 // Number of stack slots between incoming argument block and the start of
2613 // a new frame. The PROLOG must add this many slots to the stack. The
2614 // EPILOG must remove this many slots. amd64 needs two slots for
2615 // return address.
in_preserve_stack_slots()2616 uint SharedRuntime::in_preserve_stack_slots() {
2617 return 4 + 2 * VerifyStackAtCalls;
2618 }
2619
2620 //------------------------------generate_deopt_blob----------------------------
generate_deopt_blob()2621 void SharedRuntime::generate_deopt_blob() {
2622 // Allocate space for the code
2623 ResourceMark rm;
2624 // Setup code generation tools
2625 int pad = 0;
2626 if (UseAVX > 2) {
2627 pad += 1024;
2628 }
2629 #if INCLUDE_JVMCI
2630 if (EnableJVMCI || UseAOT) {
2631 pad += 512; // Increase the buffer size when compiling for JVMCI
2632 }
2633 #endif
2634 CodeBuffer buffer("deopt_blob", 2560+pad, 1024);
2635 MacroAssembler* masm = new MacroAssembler(&buffer);
2636 int frame_size_in_words;
2637 OopMap* map = NULL;
2638 OopMapSet *oop_maps = new OopMapSet();
2639
2640 // -------------
2641 // This code enters when returning to a de-optimized nmethod. A return
2642 // address has been pushed on the the stack, and return values are in
2643 // registers.
2644 // If we are doing a normal deopt then we were called from the patched
2645 // nmethod from the point we returned to the nmethod. So the return
2646 // address on the stack is wrong by NativeCall::instruction_size
2647 // We will adjust the value so it looks like we have the original return
2648 // address on the stack (like when we eagerly deoptimized).
2649 // In the case of an exception pending when deoptimizing, we enter
2650 // with a return address on the stack that points after the call we patched
2651 // into the exception handler. We have the following register state from,
2652 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2653 // rax: exception oop
2654 // rbx: exception handler
2655 // rdx: throwing pc
2656 // So in this case we simply jam rdx into the useless return address and
2657 // the stack looks just like we want.
2658 //
2659 // At this point we need to de-opt. We save the argument return
2660 // registers. We call the first C routine, fetch_unroll_info(). This
2661 // routine captures the return values and returns a structure which
2662 // describes the current frame size and the sizes of all replacement frames.
2663 // The current frame is compiled code and may contain many inlined
2664 // functions, each with their own JVM state. We pop the current frame, then
2665 // push all the new frames. Then we call the C routine unpack_frames() to
2666 // populate these frames. Finally unpack_frames() returns us the new target
2667 // address. Notice that callee-save registers are BLOWN here; they have
2668 // already been captured in the vframeArray at the time the return PC was
2669 // patched.
2670 address start = __ pc();
2671 Label cont;
2672
2673 // Prolog for non exception case!
2674
2675 // Save everything in sight.
2676 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_vectors*/ true);
2677
2678 // Normal deoptimization. Save exec mode for unpack_frames.
2679 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
2680 __ jmp(cont);
2681
2682 int reexecute_offset = __ pc() - start;
2683 #if INCLUDE_JVMCI && !defined(COMPILER1)
2684 if (EnableJVMCI && UseJVMCICompiler) {
2685 // JVMCI does not use this kind of deoptimization
2686 __ should_not_reach_here();
2687 }
2688 #endif
2689
2690 // Reexecute case
2691 // return address is the pc describes what bci to do re-execute at
2692
2693 // No need to update map as each call to save_live_registers will produce identical oopmap
2694 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_vectors*/ true);
2695
2696 __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
2697 __ jmp(cont);
2698
2699 #if INCLUDE_JVMCI
2700 Label after_fetch_unroll_info_call;
2701 int implicit_exception_uncommon_trap_offset = 0;
2702 int uncommon_trap_offset = 0;
2703
2704 if (EnableJVMCI || UseAOT) {
2705 implicit_exception_uncommon_trap_offset = __ pc() - start;
2706
2707 __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2708 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), (int32_t)NULL_WORD);
2709
2710 uncommon_trap_offset = __ pc() - start;
2711
2712 // Save everything in sight.
2713 RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_vectors*/ true);
2714 // fetch_unroll_info needs to call last_java_frame()
2715 __ set_last_Java_frame(noreg, noreg, NULL);
2716
2717 __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())));
2718 __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1);
2719
2720 __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute);
2721 __ mov(c_rarg0, r15_thread);
2722 __ movl(c_rarg2, r14); // exec mode
2723 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2724 oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2725
2726 __ reset_last_Java_frame(false);
2727
2728 __ jmp(after_fetch_unroll_info_call);
2729 } // EnableJVMCI
2730 #endif // INCLUDE_JVMCI
2731
2732 int exception_offset = __ pc() - start;
2733
2734 // Prolog for exception case
2735
2736 // all registers are dead at this entry point, except for rax, and
2737 // rdx which contain the exception oop and exception pc
2738 // respectively. Set them in TLS and fall thru to the
2739 // unpack_with_exception_in_tls entry point.
2740
2741 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
2742 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
2743
2744 int exception_in_tls_offset = __ pc() - start;
2745
2746 // new implementation because exception oop is now passed in JavaThread
2747
2748 // Prolog for exception case
2749 // All registers must be preserved because they might be used by LinearScan
2750 // Exceptiop oop and throwing PC are passed in JavaThread
2751 // tos: stack at point of call to method that threw the exception (i.e. only
2752 // args are on the stack, no return address)
2753
2754 // make room on stack for the return address
2755 // It will be patched later with the throwing pc. The correct value is not
2756 // available now because loading it from memory would destroy registers.
2757 __ push(0);
2758
2759 // Save everything in sight.
2760 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_vectors*/ true);
2761
2762 // Now it is safe to overwrite any register
2763
2764 // Deopt during an exception. Save exec mode for unpack_frames.
2765 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
2766
2767 // load throwing pc from JavaThread and patch it as the return address
2768 // of the current frame. Then clear the field in JavaThread
2769
2770 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
2771 __ movptr(Address(rbp, wordSize), rdx);
2772 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
2773
2774 #ifdef ASSERT
2775 // verify that there is really an exception oop in JavaThread
2776 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
2777 __ verify_oop(rax);
2778
2779 // verify that there is no pending exception
2780 Label no_pending_exception;
2781 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
2782 __ testptr(rax, rax);
2783 __ jcc(Assembler::zero, no_pending_exception);
2784 __ stop("must not have pending exception here");
2785 __ bind(no_pending_exception);
2786 #endif
2787
2788 __ bind(cont);
2789
2790 // Call C code. Need thread and this frame, but NOT official VM entry
2791 // crud. We cannot block on this call, no GC can happen.
2792 //
2793 // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2794
2795 // fetch_unroll_info needs to call last_java_frame().
2796
2797 __ set_last_Java_frame(noreg, noreg, NULL);
2798 #ifdef ASSERT
2799 { Label L;
2800 __ cmpptr(Address(r15_thread,
2801 JavaThread::last_Java_fp_offset()),
2802 (int32_t)0);
2803 __ jcc(Assembler::equal, L);
2804 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2805 __ bind(L);
2806 }
2807 #endif // ASSERT
2808 __ mov(c_rarg0, r15_thread);
2809 __ movl(c_rarg1, r14); // exec_mode
2810 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2811
2812 // Need to have an oopmap that tells fetch_unroll_info where to
2813 // find any register it might need.
2814 oop_maps->add_gc_map(__ pc() - start, map);
2815
2816 __ reset_last_Java_frame(false);
2817
2818 #if INCLUDE_JVMCI
2819 if (EnableJVMCI || UseAOT) {
2820 __ bind(after_fetch_unroll_info_call);
2821 }
2822 #endif
2823
2824 // Load UnrollBlock* into rdi
2825 __ mov(rdi, rax);
2826
2827 __ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2828 Label noException;
2829 __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
2830 __ jcc(Assembler::notEqual, noException);
2831 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
2832 // QQQ this is useless it was NULL above
2833 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
2834 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
2835 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
2836
2837 __ verify_oop(rax);
2838
2839 // Overwrite the result registers with the exception results.
2840 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
2841 // I think this is useless
2842 __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
2843
2844 __ bind(noException);
2845
2846 // Only register save data is on the stack.
2847 // Now restore the result registers. Everything else is either dead
2848 // or captured in the vframeArray.
2849 RegisterSaver::restore_result_registers(masm);
2850
2851 // All of the register save area has been popped of the stack. Only the
2852 // return address remains.
2853
2854 // Pop all the frames we must move/replace.
2855 //
2856 // Frame picture (youngest to oldest)
2857 // 1: self-frame (no frame link)
2858 // 2: deopting frame (no frame link)
2859 // 3: caller of deopting frame (could be compiled/interpreted).
2860 //
2861 // Note: by leaving the return address of self-frame on the stack
2862 // and using the size of frame 2 to adjust the stack
2863 // when we are done the return to frame 3 will still be on the stack.
2864
2865 // Pop deoptimized frame
2866 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2867 __ addptr(rsp, rcx);
2868
2869 // rsp should be pointing at the return address to the caller (3)
2870
2871 // Pick up the initial fp we should save
2872 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2873 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2874
2875 #ifdef ASSERT
2876 // Compilers generate code that bang the stack by as much as the
2877 // interpreter would need. So this stack banging should never
2878 // trigger a fault. Verify that it does not on non product builds.
2879 if (UseStackBanging) {
2880 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2881 __ bang_stack_size(rbx, rcx);
2882 }
2883 #endif
2884
2885 // Load address of array of frame pcs into rcx
2886 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2887
2888 // Trash the old pc
2889 __ addptr(rsp, wordSize);
2890
2891 // Load address of array of frame sizes into rsi
2892 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2893
2894 // Load counter into rdx
2895 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2896
2897 // Now adjust the caller's stack to make up for the extra locals
2898 // but record the original sp so that we can save it in the skeletal interpreter
2899 // frame and the stack walking of interpreter_sender will get the unextended sp
2900 // value and not the "real" sp value.
2901
2902 const Register sender_sp = r8;
2903
2904 __ mov(sender_sp, rsp);
2905 __ movl(rbx, Address(rdi,
2906 Deoptimization::UnrollBlock::
2907 caller_adjustment_offset_in_bytes()));
2908 __ subptr(rsp, rbx);
2909
2910 // Push interpreter frames in a loop
2911 Label loop;
2912 __ bind(loop);
2913 __ movptr(rbx, Address(rsi, 0)); // Load frame size
2914 __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand
2915 __ pushptr(Address(rcx, 0)); // Save return address
2916 __ enter(); // Save old & set new ebp
2917 __ subptr(rsp, rbx); // Prolog
2918 // This value is corrected by layout_activation_impl
2919 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
2920 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
2921 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
2922 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
2923 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
2924 __ decrementl(rdx); // Decrement counter
2925 __ jcc(Assembler::notZero, loop);
2926 __ pushptr(Address(rcx, 0)); // Save final return address
2927
2928 // Re-push self-frame
2929 __ enter(); // Save old & set new ebp
2930
2931 // Allocate a full sized register save area.
2932 // Return address and rbp are in place, so we allocate two less words.
2933 __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
2934
2935 // Restore frame locals after moving the frame
2936 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
2937 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
2938
2939 // Call C code. Need thread but NOT official VM entry
2940 // crud. We cannot block on this call, no GC can happen. Call should
2941 // restore return values to their stack-slots with the new SP.
2942 //
2943 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2944
2945 // Use rbp because the frames look interpreted now
2946 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
2947 // Don't need the precise return PC here, just precise enough to point into this code blob.
2948 address the_pc = __ pc();
2949 __ set_last_Java_frame(noreg, rbp, the_pc);
2950
2951 __ andptr(rsp, -(StackAlignmentInBytes)); // Fix stack alignment as required by ABI
2952 __ mov(c_rarg0, r15_thread);
2953 __ movl(c_rarg1, r14); // second arg: exec_mode
2954 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2955 // Revert SP alignment after call since we're going to do some SP relative addressing below
2956 __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
2957
2958 // Set an oopmap for the call site
2959 // Use the same PC we used for the last java frame
2960 oop_maps->add_gc_map(the_pc - start,
2961 new OopMap( frame_size_in_words, 0 ));
2962
2963 // Clear fp AND pc
2964 __ reset_last_Java_frame(true);
2965
2966 // Collect return values
2967 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
2968 __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
2969 // I think this is useless (throwing pc?)
2970 __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
2971
2972 // Pop self-frame.
2973 __ leave(); // Epilog
2974
2975 // Jump to interpreter
2976 __ ret(0);
2977
2978 // Make sure all code is generated
2979 masm->flush();
2980
2981 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2982 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2983 #if INCLUDE_JVMCI
2984 if (EnableJVMCI || UseAOT) {
2985 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2986 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2987 }
2988 #endif
2989 }
2990
2991 #ifdef COMPILER2
2992 //------------------------------generate_uncommon_trap_blob--------------------
generate_uncommon_trap_blob()2993 void SharedRuntime::generate_uncommon_trap_blob() {
2994 // Allocate space for the code
2995 ResourceMark rm;
2996 // Setup code generation tools
2997 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2998 MacroAssembler* masm = new MacroAssembler(&buffer);
2999
3000 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3001
3002 address start = __ pc();
3003
3004 if (UseRTMLocking) {
3005 // Abort RTM transaction before possible nmethod deoptimization.
3006 __ xabort(0);
3007 }
3008
3009 // Push self-frame. We get here with a return address on the
3010 // stack, so rsp is 8-byte aligned until we allocate our frame.
3011 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3012
3013 // No callee saved registers. rbp is assumed implicitly saved
3014 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3015
3016 // compiler left unloaded_class_index in j_rarg0 move to where the
3017 // runtime expects it.
3018 __ movl(c_rarg1, j_rarg0);
3019
3020 __ set_last_Java_frame(noreg, noreg, NULL);
3021
3022 // Call C code. Need thread but NOT official VM entry
3023 // crud. We cannot block on this call, no GC can happen. Call should
3024 // capture callee-saved registers as well as return values.
3025 // Thread is in rdi already.
3026 //
3027 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3028
3029 __ mov(c_rarg0, r15_thread);
3030 __ movl(c_rarg2, Deoptimization::Unpack_uncommon_trap);
3031 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3032
3033 // Set an oopmap for the call site
3034 OopMapSet* oop_maps = new OopMapSet();
3035 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3036
3037 // location of rbp is known implicitly by the frame sender code
3038
3039 oop_maps->add_gc_map(__ pc() - start, map);
3040
3041 __ reset_last_Java_frame(false);
3042
3043 // Load UnrollBlock* into rdi
3044 __ mov(rdi, rax);
3045
3046 #ifdef ASSERT
3047 { Label L;
3048 __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
3049 (int32_t)Deoptimization::Unpack_uncommon_trap);
3050 __ jcc(Assembler::equal, L);
3051 __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3052 __ bind(L);
3053 }
3054 #endif
3055
3056 // Pop all the frames we must move/replace.
3057 //
3058 // Frame picture (youngest to oldest)
3059 // 1: self-frame (no frame link)
3060 // 2: deopting frame (no frame link)
3061 // 3: caller of deopting frame (could be compiled/interpreted).
3062
3063 // Pop self-frame. We have no frame, and must rely only on rax and rsp.
3064 __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3065
3066 // Pop deoptimized frame (int)
3067 __ movl(rcx, Address(rdi,
3068 Deoptimization::UnrollBlock::
3069 size_of_deoptimized_frame_offset_in_bytes()));
3070 __ addptr(rsp, rcx);
3071
3072 // rsp should be pointing at the return address to the caller (3)
3073
3074 // Pick up the initial fp we should save
3075 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3076 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3077
3078 #ifdef ASSERT
3079 // Compilers generate code that bang the stack by as much as the
3080 // interpreter would need. So this stack banging should never
3081 // trigger a fault. Verify that it does not on non product builds.
3082 if (UseStackBanging) {
3083 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3084 __ bang_stack_size(rbx, rcx);
3085 }
3086 #endif
3087
3088 // Load address of array of frame pcs into rcx (address*)
3089 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3090
3091 // Trash the return pc
3092 __ addptr(rsp, wordSize);
3093
3094 // Load address of array of frame sizes into rsi (intptr_t*)
3095 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
3096
3097 // Counter
3098 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
3099
3100 // Now adjust the caller's stack to make up for the extra locals but
3101 // record the original sp so that we can save it in the skeletal
3102 // interpreter frame and the stack walking of interpreter_sender
3103 // will get the unextended sp value and not the "real" sp value.
3104
3105 const Register sender_sp = r8;
3106
3107 __ mov(sender_sp, rsp);
3108 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
3109 __ subptr(rsp, rbx);
3110
3111 // Push interpreter frames in a loop
3112 Label loop;
3113 __ bind(loop);
3114 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3115 __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand
3116 __ pushptr(Address(rcx, 0)); // Save return address
3117 __ enter(); // Save old & set new rbp
3118 __ subptr(rsp, rbx); // Prolog
3119 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3120 sender_sp); // Make it walkable
3121 // This value is corrected by layout_activation_impl
3122 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3123 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3124 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3125 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3126 __ decrementl(rdx); // Decrement counter
3127 __ jcc(Assembler::notZero, loop);
3128 __ pushptr(Address(rcx, 0)); // Save final return address
3129
3130 // Re-push self-frame
3131 __ enter(); // Save old & set new rbp
3132 __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3133 // Prolog
3134
3135 // Use rbp because the frames look interpreted now
3136 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3137 // Don't need the precise return PC here, just precise enough to point into this code blob.
3138 address the_pc = __ pc();
3139 __ set_last_Java_frame(noreg, rbp, the_pc);
3140
3141 // Call C code. Need thread but NOT official VM entry
3142 // crud. We cannot block on this call, no GC can happen. Call should
3143 // restore return values to their stack-slots with the new SP.
3144 // Thread is in rdi already.
3145 //
3146 // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3147
3148 __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3149 __ mov(c_rarg0, r15_thread);
3150 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3151 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3152
3153 // Set an oopmap for the call site
3154 // Use the same PC we used for the last java frame
3155 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3156
3157 // Clear fp AND pc
3158 __ reset_last_Java_frame(true);
3159
3160 // Pop self-frame.
3161 __ leave(); // Epilog
3162
3163 // Jump to interpreter
3164 __ ret(0);
3165
3166 // Make sure all code is generated
3167 masm->flush();
3168
3169 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps,
3170 SimpleRuntimeFrame::framesize >> 1);
3171 }
3172 #endif // COMPILER2
3173
3174
3175 //------------------------------generate_handler_blob------
3176 //
3177 // Generate a special Compile2Runtime blob that saves all registers,
3178 // and setup oopmap.
3179 //
generate_handler_blob(address call_ptr,int poll_type)3180 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3181 assert(StubRoutines::forward_exception_entry() != NULL,
3182 "must be generated before");
3183
3184 ResourceMark rm;
3185 OopMapSet *oop_maps = new OopMapSet();
3186 OopMap* map;
3187
3188 // Allocate space for the code. Setup code generation tools.
3189 CodeBuffer buffer("handler_blob", 2048, 1024);
3190 MacroAssembler* masm = new MacroAssembler(&buffer);
3191
3192 address start = __ pc();
3193 address call_pc = NULL;
3194 int frame_size_in_words;
3195 bool cause_return = (poll_type == POLL_AT_RETURN);
3196 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3197
3198 if (UseRTMLocking) {
3199 // Abort RTM transaction before calling runtime
3200 // because critical section will be large and will be
3201 // aborted anyway. Also nmethod could be deoptimized.
3202 __ xabort(0);
3203 }
3204
3205 // Make room for return address (or push it again)
3206 if (!cause_return) {
3207 __ push(rbx);
3208 }
3209
3210 // Save registers, fpu state, and flags
3211 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3212
3213 // The following is basically a call_VM. However, we need the precise
3214 // address of the call in order to generate an oopmap. Hence, we do all the
3215 // work outselves.
3216
3217 __ set_last_Java_frame(noreg, noreg, NULL);
3218
3219 // The return address must always be correct so that frame constructor never
3220 // sees an invalid pc.
3221
3222 if (!cause_return) {
3223 // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
3224 // Additionally, rbx is a callee saved register and we can look at it later to determine
3225 // if someone changed the return address for us!
3226 __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3227 __ movptr(Address(rbp, wordSize), rbx);
3228 }
3229
3230 // Do the call
3231 __ mov(c_rarg0, r15_thread);
3232 __ call(RuntimeAddress(call_ptr));
3233
3234 // Set an oopmap for the call site. This oopmap will map all
3235 // oop-registers and debug-info registers as callee-saved. This
3236 // will allow deoptimization at this safepoint to find all possible
3237 // debug-info recordings, as well as let GC find all oops.
3238
3239 oop_maps->add_gc_map( __ pc() - start, map);
3240
3241 Label noException;
3242
3243 __ reset_last_Java_frame(false);
3244
3245 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3246 __ jcc(Assembler::equal, noException);
3247
3248 // Exception pending
3249
3250 RegisterSaver::restore_live_registers(masm, save_vectors);
3251
3252 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3253
3254 // No exception case
3255 __ bind(noException);
3256
3257 Label no_adjust;
3258 #ifdef ASSERT
3259 Label bail;
3260 #endif
3261 if (!cause_return) {
3262 Label no_prefix, not_special;
3263
3264 // If our stashed return pc was modified by the runtime we avoid touching it
3265 __ cmpptr(rbx, Address(rbp, wordSize));
3266 __ jccb(Assembler::notEqual, no_adjust);
3267
3268 // Skip over the poll instruction.
3269 // See NativeInstruction::is_safepoint_poll()
3270 // Possible encodings:
3271 // 85 00 test %eax,(%rax)
3272 // 85 01 test %eax,(%rcx)
3273 // 85 02 test %eax,(%rdx)
3274 // 85 03 test %eax,(%rbx)
3275 // 85 06 test %eax,(%rsi)
3276 // 85 07 test %eax,(%rdi)
3277 //
3278 // 41 85 00 test %eax,(%r8)
3279 // 41 85 01 test %eax,(%r9)
3280 // 41 85 02 test %eax,(%r10)
3281 // 41 85 03 test %eax,(%r11)
3282 // 41 85 06 test %eax,(%r14)
3283 // 41 85 07 test %eax,(%r15)
3284 //
3285 // 85 04 24 test %eax,(%rsp)
3286 // 41 85 04 24 test %eax,(%r12)
3287 // 85 45 00 test %eax,0x0(%rbp)
3288 // 41 85 45 00 test %eax,0x0(%r13)
3289
3290 __ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
3291 __ jcc(Assembler::notEqual, no_prefix);
3292 __ addptr(rbx, 1);
3293 __ bind(no_prefix);
3294 #ifdef ASSERT
3295 __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
3296 #endif
3297 // r12/r13/rsp/rbp base encoding takes 3 bytes with the following register values:
3298 // r12/rsp 0x04
3299 // r13/rbp 0x05
3300 __ movzbq(rcx, Address(rbx, 1));
3301 __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
3302 __ subptr(rcx, 4); // looking for 0x00 .. 0x01
3303 __ cmpptr(rcx, 1);
3304 __ jcc(Assembler::above, not_special);
3305 __ addptr(rbx, 1);
3306 __ bind(not_special);
3307 #ifdef ASSERT
3308 // Verify the correct encoding of the poll we're about to skip.
3309 __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
3310 __ jcc(Assembler::notEqual, bail);
3311 // Mask out the modrm bits
3312 __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
3313 // rax encodes to 0, so if the bits are nonzero it's incorrect
3314 __ jcc(Assembler::notZero, bail);
3315 #endif
3316 // Adjust return pc forward to step over the safepoint poll instruction
3317 __ addptr(rbx, 2);
3318 __ movptr(Address(rbp, wordSize), rbx);
3319 }
3320
3321 __ bind(no_adjust);
3322 // Normal exit, restore registers and exit.
3323 RegisterSaver::restore_live_registers(masm, save_vectors);
3324 __ ret(0);
3325
3326 #ifdef ASSERT
3327 __ bind(bail);
3328 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
3329 #endif
3330
3331 // Make sure all code is generated
3332 masm->flush();
3333
3334 // Fill-out other meta info
3335 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3336 }
3337
3338 //
3339 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3340 //
3341 // Generate a stub that calls into vm to find out the proper destination
3342 // of a java call. All the argument registers are live at this point
3343 // but since this is generic code we don't know what they are and the caller
3344 // must do any gc of the args.
3345 //
generate_resolve_blob(address destination,const char * name)3346 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3347 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3348
3349 // allocate space for the code
3350 ResourceMark rm;
3351
3352 CodeBuffer buffer(name, 1000, 512);
3353 MacroAssembler* masm = new MacroAssembler(&buffer);
3354
3355 int frame_size_in_words;
3356
3357 OopMapSet *oop_maps = new OopMapSet();
3358 OopMap* map = NULL;
3359
3360 int start = __ offset();
3361
3362 // No need to save vector registers since they are caller-saved anyway.
3363 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, /*save_vectors*/ false);
3364
3365 int frame_complete = __ offset();
3366
3367 __ set_last_Java_frame(noreg, noreg, NULL);
3368
3369 __ mov(c_rarg0, r15_thread);
3370
3371 __ call(RuntimeAddress(destination));
3372
3373
3374 // Set an oopmap for the call site.
3375 // We need this not only for callee-saved registers, but also for volatile
3376 // registers that the compiler might be keeping live across a safepoint.
3377
3378 oop_maps->add_gc_map( __ offset() - start, map);
3379
3380 // rax contains the address we are going to jump to assuming no exception got installed
3381
3382 // clear last_Java_sp
3383 __ reset_last_Java_frame(false);
3384 // check for pending exceptions
3385 Label pending;
3386 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3387 __ jcc(Assembler::notEqual, pending);
3388
3389 // get the returned Method*
3390 __ get_vm_result_2(rbx, r15_thread);
3391 __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3392
3393 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3394
3395 RegisterSaver::restore_live_registers(masm);
3396
3397 // We are back the the original state on entry and ready to go.
3398
3399 __ jmp(rax);
3400
3401 // Pending exception after the safepoint
3402
3403 __ bind(pending);
3404
3405 RegisterSaver::restore_live_registers(masm);
3406
3407 // exception pending => remove activation and forward to exception handler
3408
3409 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3410
3411 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3412 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3413
3414 // -------------
3415 // make sure all code is generated
3416 masm->flush();
3417
3418 // return the blob
3419 // frame_size_words or bytes??
3420 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3421 }
3422
3423 static const int native_invoker_code_size = MethodHandles::adapter_code_size;
3424
3425 class NativeInvokerGenerator : public StubCodeGenerator {
3426 address _call_target;
3427 int _shadow_space_bytes;
3428
3429 const GrowableArray<VMReg>& _input_registers;
3430 const GrowableArray<VMReg>& _output_registers;
3431 public:
NativeInvokerGenerator(CodeBuffer * buffer,address call_target,int shadow_space_bytes,const GrowableArray<VMReg> & input_registers,const GrowableArray<VMReg> & output_registers)3432 NativeInvokerGenerator(CodeBuffer* buffer,
3433 address call_target,
3434 int shadow_space_bytes,
3435 const GrowableArray<VMReg>& input_registers,
3436 const GrowableArray<VMReg>& output_registers)
3437 : StubCodeGenerator(buffer, PrintMethodHandleStubs),
3438 _call_target(call_target),
3439 _shadow_space_bytes(shadow_space_bytes),
3440 _input_registers(input_registers),
3441 _output_registers(output_registers) {}
3442 void generate();
3443
spill_register(VMReg reg)3444 void spill_register(VMReg reg) {
3445 assert(reg->is_reg(), "must be a register");
3446 MacroAssembler* masm = _masm;
3447 if (reg->is_Register()) {
3448 __ push(reg->as_Register());
3449 } else if (reg->is_XMMRegister()) {
3450 if (UseAVX >= 3) {
3451 __ subptr(rsp, 64); // bytes
3452 __ evmovdqul(Address(rsp, 0), reg->as_XMMRegister(), Assembler::AVX_512bit);
3453 } else if (UseAVX >= 1) {
3454 __ subptr(rsp, 32);
3455 __ vmovdqu(Address(rsp, 0), reg->as_XMMRegister());
3456 } else {
3457 __ subptr(rsp, 16);
3458 __ movdqu(Address(rsp, 0), reg->as_XMMRegister());
3459 }
3460 } else {
3461 ShouldNotReachHere();
3462 }
3463 }
3464
fill_register(VMReg reg)3465 void fill_register(VMReg reg) {
3466 assert(reg->is_reg(), "must be a register");
3467 MacroAssembler* masm = _masm;
3468 if (reg->is_Register()) {
3469 __ pop(reg->as_Register());
3470 } else if (reg->is_XMMRegister()) {
3471 if (UseAVX >= 3) {
3472 __ evmovdqul(reg->as_XMMRegister(), Address(rsp, 0), Assembler::AVX_512bit);
3473 __ addptr(rsp, 64); // bytes
3474 } else if (UseAVX >= 1) {
3475 __ vmovdqu(reg->as_XMMRegister(), Address(rsp, 0));
3476 __ addptr(rsp, 32);
3477 } else {
3478 __ movdqu(reg->as_XMMRegister(), Address(rsp, 0));
3479 __ addptr(rsp, 16);
3480 }
3481 } else {
3482 ShouldNotReachHere();
3483 }
3484 }
3485
3486 private:
3487 #ifdef ASSERT
target_uses_register(VMReg reg)3488 bool target_uses_register(VMReg reg) {
3489 return _input_registers.contains(reg) || _output_registers.contains(reg);
3490 }
3491 #endif
3492 };
3493
make_native_invoker(address call_target,int shadow_space_bytes,const GrowableArray<VMReg> & input_registers,const GrowableArray<VMReg> & output_registers)3494 BufferBlob* SharedRuntime::make_native_invoker(address call_target,
3495 int shadow_space_bytes,
3496 const GrowableArray<VMReg>& input_registers,
3497 const GrowableArray<VMReg>& output_registers) {
3498 BufferBlob* _invoke_native_blob = BufferBlob::create("nep_invoker_blob", native_invoker_code_size);
3499 if (_invoke_native_blob == NULL)
3500 return NULL; // allocation failure
3501
3502 CodeBuffer code(_invoke_native_blob);
3503 NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers);
3504 g.generate();
3505 code.log_section_sizes("nep_invoker_blob");
3506
3507 return _invoke_native_blob;
3508 }
3509
generate()3510 void NativeInvokerGenerator::generate() {
3511 assert(!(target_uses_register(r15_thread->as_VMReg()) || target_uses_register(rscratch1->as_VMReg())), "Register conflict");
3512
3513 MacroAssembler* masm = _masm;
3514 __ enter();
3515
3516 Address java_pc(r15_thread, JavaThread::last_Java_pc_offset());
3517 __ movptr(rscratch1, Address(rsp, 8)); // read return address from stack
3518 __ movptr(java_pc, rscratch1);
3519
3520 __ movptr(rscratch1, rsp);
3521 __ addptr(rscratch1, 16); // skip return and frame
3522 __ movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), rscratch1);
3523
3524 __ movptr(Address(r15_thread, JavaThread::saved_rbp_address_offset()), rsp); // rsp points at saved RBP
3525
3526 // State transition
3527 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
3528
3529 if (_shadow_space_bytes != 0) {
3530 // needed here for correct stack args offset on Windows
3531 __ subptr(rsp, _shadow_space_bytes);
3532 }
3533
3534 __ call(RuntimeAddress(_call_target));
3535
3536 if (_shadow_space_bytes != 0) {
3537 // needed here for correct stack args offset on Windows
3538 __ addptr(rsp, _shadow_space_bytes);
3539 }
3540
3541 assert(_output_registers.length() <= 1
3542 || (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
3543 bool need_spills = _output_registers.length() != 0;
3544 VMReg ret_reg = need_spills ? _output_registers.at(0) : VMRegImpl::Bad();
3545
3546 __ restore_cpu_control_state_after_jni();
3547
3548 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
3549
3550 // Force this write out before the read below
3551 __ membar(Assembler::Membar_mask_bits(
3552 Assembler::LoadLoad | Assembler::LoadStore |
3553 Assembler::StoreLoad | Assembler::StoreStore));
3554
3555 Label L_after_safepoint_poll;
3556 Label L_safepoint_poll_slow_path;
3557
3558 __ safepoint_poll(L_safepoint_poll_slow_path, r15_thread, true /* at_return */, false /* in_nmethod */);
3559 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
3560 __ jcc(Assembler::notEqual, L_safepoint_poll_slow_path);
3561
3562 __ bind(L_after_safepoint_poll);
3563
3564 // change thread state
3565 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
3566
3567 __ block_comment("reguard stack check");
3568 Label L_reguard;
3569 Label L_after_reguard;
3570 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
3571 __ jcc(Assembler::equal, L_reguard);
3572 __ bind(L_after_reguard);
3573
3574 __ reset_last_Java_frame(r15_thread, true);
3575
3576 __ leave(); // required for proper stackwalking of RuntimeStub frame
3577 __ ret(0);
3578
3579 //////////////////////////////////////////////////////////////////////////////
3580
3581 __ block_comment("{ L_safepoint_poll_slow_path");
3582 __ bind(L_safepoint_poll_slow_path);
3583 __ vzeroupper();
3584
3585 if (need_spills) {
3586 spill_register(ret_reg);
3587 }
3588
3589 __ mov(c_rarg0, r15_thread);
3590 __ mov(r12, rsp); // remember sp
3591 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
3592 __ andptr(rsp, -16); // align stack as required by ABI
3593 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
3594 __ mov(rsp, r12); // restore sp
3595 __ reinit_heapbase();
3596
3597 if (need_spills) {
3598 fill_register(ret_reg);
3599 }
3600
3601 __ jmp(L_after_safepoint_poll);
3602 __ block_comment("} L_safepoint_poll_slow_path");
3603
3604 //////////////////////////////////////////////////////////////////////////////
3605
3606 __ block_comment("{ L_reguard");
3607 __ bind(L_reguard);
3608 __ vzeroupper();
3609
3610 if (need_spills) {
3611 spill_register(ret_reg);
3612 }
3613
3614 __ mov(r12, rsp); // remember sp
3615 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
3616 __ andptr(rsp, -16); // align stack as required by ABI
3617 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
3618 __ mov(rsp, r12); // restore sp
3619 __ reinit_heapbase();
3620
3621 if (need_spills) {
3622 fill_register(ret_reg);
3623 }
3624
3625 __ jmp(L_after_reguard);
3626
3627 __ block_comment("} L_reguard");
3628
3629 //////////////////////////////////////////////////////////////////////////////
3630
3631 __ flush();
3632 }
3633
3634 //------------------------------Montgomery multiplication------------------------
3635 //
3636
3637 #ifndef _WINDOWS
3638
3639 // Subtract 0:b from carry:a. Return carry.
3640 static julong
sub(julong a[],julong b[],julong carry,long len)3641 sub(julong a[], julong b[], julong carry, long len) {
3642 long long i = 0, cnt = len;
3643 julong tmp;
3644 asm volatile("clc; "
3645 "0: ; "
3646 "mov (%[b], %[i], 8), %[tmp]; "
3647 "sbb %[tmp], (%[a], %[i], 8); "
3648 "inc %[i]; dec %[cnt]; "
3649 "jne 0b; "
3650 "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3651 : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3652 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3653 : "memory");
3654 return tmp;
3655 }
3656
3657 // Multiply (unsigned) Long A by Long B, accumulating the double-
3658 // length result into the accumulator formed of T0, T1, and T2.
3659 #define MACC(A, B, T0, T1, T2) \
3660 do { \
3661 unsigned long hi, lo; \
3662 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
3663 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
3664 : "r"(A), "a"(B) : "cc"); \
3665 } while(0)
3666
3667 // As above, but add twice the double-length result into the
3668 // accumulator.
3669 #define MACC2(A, B, T0, T1, T2) \
3670 do { \
3671 unsigned long hi, lo; \
3672 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4; " \
3673 "add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
3674 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
3675 : "r"(A), "a"(B) : "cc"); \
3676 } while(0)
3677
3678 #else //_WINDOWS
3679
3680 static julong
sub(julong a[],julong b[],julong carry,long len)3681 sub(julong a[], julong b[], julong carry, long len) {
3682 long i;
3683 julong tmp;
3684 unsigned char c = 1;
3685 for (i = 0; i < len; i++) {
3686 c = _addcarry_u64(c, a[i], ~b[i], &tmp);
3687 a[i] = tmp;
3688 }
3689 c = _addcarry_u64(c, carry, ~0, &tmp);
3690 return tmp;
3691 }
3692
3693 // Multiply (unsigned) Long A by Long B, accumulating the double-
3694 // length result into the accumulator formed of T0, T1, and T2.
3695 #define MACC(A, B, T0, T1, T2) \
3696 do { \
3697 julong hi, lo; \
3698 lo = _umul128(A, B, &hi); \
3699 unsigned char c = _addcarry_u64(0, lo, T0, &T0); \
3700 c = _addcarry_u64(c, hi, T1, &T1); \
3701 _addcarry_u64(c, T2, 0, &T2); \
3702 } while(0)
3703
3704 // As above, but add twice the double-length result into the
3705 // accumulator.
3706 #define MACC2(A, B, T0, T1, T2) \
3707 do { \
3708 julong hi, lo; \
3709 lo = _umul128(A, B, &hi); \
3710 unsigned char c = _addcarry_u64(0, lo, T0, &T0); \
3711 c = _addcarry_u64(c, hi, T1, &T1); \
3712 _addcarry_u64(c, T2, 0, &T2); \
3713 c = _addcarry_u64(0, lo, T0, &T0); \
3714 c = _addcarry_u64(c, hi, T1, &T1); \
3715 _addcarry_u64(c, T2, 0, &T2); \
3716 } while(0)
3717
3718 #endif //_WINDOWS
3719
3720 // Fast Montgomery multiplication. The derivation of the algorithm is
3721 // in A Cryptographic Library for the Motorola DSP56000,
3722 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237.
3723
3724 static void NOINLINE
montgomery_multiply(julong a[],julong b[],julong n[],julong m[],julong inv,int len)3725 montgomery_multiply(julong a[], julong b[], julong n[],
3726 julong m[], julong inv, int len) {
3727 julong t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3728 int i;
3729
3730 assert(inv * n[0] == ULLONG_MAX, "broken inverse in Montgomery multiply");
3731
3732 for (i = 0; i < len; i++) {
3733 int j;
3734 for (j = 0; j < i; j++) {
3735 MACC(a[j], b[i-j], t0, t1, t2);
3736 MACC(m[j], n[i-j], t0, t1, t2);
3737 }
3738 MACC(a[i], b[0], t0, t1, t2);
3739 m[i] = t0 * inv;
3740 MACC(m[i], n[0], t0, t1, t2);
3741
3742 assert(t0 == 0, "broken Montgomery multiply");
3743
3744 t0 = t1; t1 = t2; t2 = 0;
3745 }
3746
3747 for (i = len; i < 2*len; i++) {
3748 int j;
3749 for (j = i-len+1; j < len; j++) {
3750 MACC(a[j], b[i-j], t0, t1, t2);
3751 MACC(m[j], n[i-j], t0, t1, t2);
3752 }
3753 m[i-len] = t0;
3754 t0 = t1; t1 = t2; t2 = 0;
3755 }
3756
3757 while (t0)
3758 t0 = sub(m, n, t0, len);
3759 }
3760
3761 // Fast Montgomery squaring. This uses asymptotically 25% fewer
3762 // multiplies so it should be up to 25% faster than Montgomery
3763 // multiplication. However, its loop control is more complex and it
3764 // may actually run slower on some machines.
3765
3766 static void NOINLINE
montgomery_square(julong a[],julong n[],julong m[],julong inv,int len)3767 montgomery_square(julong a[], julong n[],
3768 julong m[], julong inv, int len) {
3769 julong t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3770 int i;
3771
3772 assert(inv * n[0] == ULLONG_MAX, "broken inverse in Montgomery square");
3773
3774 for (i = 0; i < len; i++) {
3775 int j;
3776 int end = (i+1)/2;
3777 for (j = 0; j < end; j++) {
3778 MACC2(a[j], a[i-j], t0, t1, t2);
3779 MACC(m[j], n[i-j], t0, t1, t2);
3780 }
3781 if ((i & 1) == 0) {
3782 MACC(a[j], a[j], t0, t1, t2);
3783 }
3784 for (; j < i; j++) {
3785 MACC(m[j], n[i-j], t0, t1, t2);
3786 }
3787 m[i] = t0 * inv;
3788 MACC(m[i], n[0], t0, t1, t2);
3789
3790 assert(t0 == 0, "broken Montgomery square");
3791
3792 t0 = t1; t1 = t2; t2 = 0;
3793 }
3794
3795 for (i = len; i < 2*len; i++) {
3796 int start = i-len+1;
3797 int end = start + (len - start)/2;
3798 int j;
3799 for (j = start; j < end; j++) {
3800 MACC2(a[j], a[i-j], t0, t1, t2);
3801 MACC(m[j], n[i-j], t0, t1, t2);
3802 }
3803 if ((i & 1) == 0) {
3804 MACC(a[j], a[j], t0, t1, t2);
3805 }
3806 for (; j < len; j++) {
3807 MACC(m[j], n[i-j], t0, t1, t2);
3808 }
3809 m[i-len] = t0;
3810 t0 = t1; t1 = t2; t2 = 0;
3811 }
3812
3813 while (t0)
3814 t0 = sub(m, n, t0, len);
3815 }
3816
3817 // Swap words in a longword.
swap(julong x)3818 static julong swap(julong x) {
3819 return (x << 32) | (x >> 32);
3820 }
3821
3822 // Copy len longwords from s to d, word-swapping as we go. The
3823 // destination array is reversed.
reverse_words(julong * s,julong * d,int len)3824 static void reverse_words(julong *s, julong *d, int len) {
3825 d += len;
3826 while(len-- > 0) {
3827 d--;
3828 *d = swap(*s);
3829 s++;
3830 }
3831 }
3832
3833 // The threshold at which squaring is advantageous was determined
3834 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
3835 #define MONTGOMERY_SQUARING_THRESHOLD 64
3836
montgomery_multiply(jint * a_ints,jint * b_ints,jint * n_ints,jint len,jlong inv,jint * m_ints)3837 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
3838 jint len, jlong inv,
3839 jint *m_ints) {
3840 assert(len % 2 == 0, "array length in montgomery_multiply must be even");
3841 int longwords = len/2;
3842
3843 // Make very sure we don't use so much space that the stack might
3844 // overflow. 512 jints corresponds to an 16384-bit integer and
3845 // will use here a total of 8k bytes of stack space.
3846 int total_allocation = longwords * sizeof (julong) * 4;
3847 guarantee(total_allocation <= 8192, "must be");
3848 julong *scratch = (julong *)alloca(total_allocation);
3849
3850 // Local scratch arrays
3851 julong
3852 *a = scratch + 0 * longwords,
3853 *b = scratch + 1 * longwords,
3854 *n = scratch + 2 * longwords,
3855 *m = scratch + 3 * longwords;
3856
3857 reverse_words((julong *)a_ints, a, longwords);
3858 reverse_words((julong *)b_ints, b, longwords);
3859 reverse_words((julong *)n_ints, n, longwords);
3860
3861 ::montgomery_multiply(a, b, n, m, (julong)inv, longwords);
3862
3863 reverse_words(m, (julong *)m_ints, longwords);
3864 }
3865
montgomery_square(jint * a_ints,jint * n_ints,jint len,jlong inv,jint * m_ints)3866 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
3867 jint len, jlong inv,
3868 jint *m_ints) {
3869 assert(len % 2 == 0, "array length in montgomery_square must be even");
3870 int longwords = len/2;
3871
3872 // Make very sure we don't use so much space that the stack might
3873 // overflow. 512 jints corresponds to an 16384-bit integer and
3874 // will use here a total of 6k bytes of stack space.
3875 int total_allocation = longwords * sizeof (julong) * 3;
3876 guarantee(total_allocation <= 8192, "must be");
3877 julong *scratch = (julong *)alloca(total_allocation);
3878
3879 // Local scratch arrays
3880 julong
3881 *a = scratch + 0 * longwords,
3882 *n = scratch + 1 * longwords,
3883 *m = scratch + 2 * longwords;
3884
3885 reverse_words((julong *)a_ints, a, longwords);
3886 reverse_words((julong *)n_ints, n, longwords);
3887
3888 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3889 ::montgomery_square(a, n, m, (julong)inv, longwords);
3890 } else {
3891 ::montgomery_multiply(a, a, n, m, (julong)inv, longwords);
3892 }
3893
3894 reverse_words(m, (julong *)m_ints, longwords);
3895 }
3896
3897 #ifdef COMPILER2
3898 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
3899 //
3900 //------------------------------generate_exception_blob---------------------------
3901 // creates exception blob at the end
3902 // Using exception blob, this code is jumped from a compiled method.
3903 // (see emit_exception_handler in x86_64.ad file)
3904 //
3905 // Given an exception pc at a call we call into the runtime for the
3906 // handler in this method. This handler might merely restore state
3907 // (i.e. callee save registers) unwind the frame and jump to the
3908 // exception handler for the nmethod if there is no Java level handler
3909 // for the nmethod.
3910 //
3911 // This code is entered with a jmp.
3912 //
3913 // Arguments:
3914 // rax: exception oop
3915 // rdx: exception pc
3916 //
3917 // Results:
3918 // rax: exception oop
3919 // rdx: exception pc in caller or ???
3920 // destination: exception handler of caller
3921 //
3922 // Note: the exception pc MUST be at a call (precise debug information)
3923 // Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
3924 //
3925
generate_exception_blob()3926 void OptoRuntime::generate_exception_blob() {
3927 assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
3928 assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
3929 assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
3930
3931 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3932
3933 // Allocate space for the code
3934 ResourceMark rm;
3935 // Setup code generation tools
3936 CodeBuffer buffer("exception_blob", 2048, 1024);
3937 MacroAssembler* masm = new MacroAssembler(&buffer);
3938
3939
3940 address start = __ pc();
3941
3942 // Exception pc is 'return address' for stack walker
3943 __ push(rdx);
3944 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
3945
3946 // Save callee-saved registers. See x86_64.ad.
3947
3948 // rbp is an implicitly saved callee saved register (i.e., the calling
3949 // convention will save/restore it in the prolog/epilog). Other than that
3950 // there are no callee save registers now that adapter frames are gone.
3951
3952 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3953
3954 // Store exception in Thread object. We cannot pass any arguments to the
3955 // handle_exception call, since we do not want to make any assumption
3956 // about the size of the frame where the exception happened in.
3957 // c_rarg0 is either rdi (Linux) or rcx (Windows).
3958 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
3959 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3960
3961 // This call does all the hard work. It checks if an exception handler
3962 // exists in the method.
3963 // If so, it returns the handler address.
3964 // If not, it prepares for stack-unwinding, restoring the callee-save
3965 // registers of the frame being removed.
3966 //
3967 // address OptoRuntime::handle_exception_C(JavaThread* thread)
3968
3969 // At a method handle call, the stack may not be properly aligned
3970 // when returning with an exception.
3971 address the_pc = __ pc();
3972 __ set_last_Java_frame(noreg, noreg, the_pc);
3973 __ mov(c_rarg0, r15_thread);
3974 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
3975 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3976
3977 // Set an oopmap for the call site. This oopmap will only be used if we
3978 // are unwinding the stack. Hence, all locations will be dead.
3979 // Callee-saved registers will be the same as the frame above (i.e.,
3980 // handle_exception_stub), since they were restored when we got the
3981 // exception.
3982
3983 OopMapSet* oop_maps = new OopMapSet();
3984
3985 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3986
3987 __ reset_last_Java_frame(false);
3988
3989 // Restore callee-saved registers
3990
3991 // rbp is an implicitly saved callee-saved register (i.e., the calling
3992 // convention will save restore it in prolog/epilog) Other than that
3993 // there are no callee save registers now that adapter frames are gone.
3994
3995 __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
3996
3997 __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
3998 __ pop(rdx); // No need for exception pc anymore
3999
4000 // rax: exception handler
4001
4002 // We have a handler in rax (could be deopt blob).
4003 __ mov(r8, rax);
4004
4005 // Get the exception oop
4006 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4007 // Get the exception pc in case we are deoptimized
4008 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4009 #ifdef ASSERT
4010 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4011 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4012 #endif
4013 // Clear the exception oop so GC no longer processes it as a root.
4014 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4015
4016 // rax: exception oop
4017 // r8: exception handler
4018 // rdx: exception pc
4019 // Jump to handler
4020
4021 __ jmp(r8);
4022
4023 // Make sure all code is generated
4024 masm->flush();
4025
4026 // Set exception blob
4027 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4028 }
4029 #endif // COMPILER2
4030