1 /*
2 * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
4 * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "asm/macroAssembler.hpp"
29 #include "asm/macroAssembler.inline.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/debugInfoRec.hpp"
32 #include "code/icBuffer.hpp"
33 #include "code/vtableStubs.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "interpreter/interp_masm.hpp"
38 #include "logging/log.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "nativeInst_aarch64.hpp"
41 #include "oops/compiledICHolder.hpp"
42 #include "oops/klass.inline.hpp"
43 #include "prims/methodHandles.hpp"
44 #include "runtime/jniHandles.hpp"
45 #include "runtime/safepointMechanism.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/signature.hpp"
48 #include "runtime/stubRoutines.hpp"
49 #include "runtime/vframeArray.hpp"
50 #include "utilities/align.hpp"
51 #include "utilities/formatBuffer.hpp"
52 #include "vmreg_aarch64.inline.hpp"
53 #ifdef COMPILER1
54 #include "c1/c1_Runtime1.hpp"
55 #endif
56 #ifdef COMPILER2
57 #include "adfiles/ad_aarch64.hpp"
58 #include "opto/runtime.hpp"
59 #endif
60 #if INCLUDE_JVMCI
61 #include "jvmci/jvmciJavaClasses.hpp"
62 #endif
63
64 #define __ masm->
65
66 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
67
68 class SimpleRuntimeFrame {
69
70 public:
71
72 // Most of the runtime stubs have this simple frame layout.
73 // This class exists to make the layout shared in one place.
74 // Offsets are for compiler stack slots, which are jints.
75 enum layout {
76 // The frame sender code expects that rbp will be in the "natural" place and
77 // will override any oopMap setting for it. We must therefore force the layout
78 // so that it agrees with the frame sender code.
79 // we don't expect any arg reg save area so aarch64 asserts that
80 // frame::arg_reg_save_area_bytes == 0
81 rbp_off = 0,
82 rbp_off2,
83 return_off, return_off2,
84 framesize
85 };
86 };
87
88 // FIXME -- this is used by C1
89 class RegisterSaver {
90 const bool _save_vectors;
91 public:
RegisterSaver(bool save_vectors)92 RegisterSaver(bool save_vectors) : _save_vectors(save_vectors) {}
93
94 OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
95 void restore_live_registers(MacroAssembler* masm);
96
97 // Offsets into the register save area
98 // Used by deoptimization when it is managing result register
99 // values on its own
100
101 int reg_offset_in_bytes(Register r);
r0_offset_in_bytes()102 int r0_offset_in_bytes() { return reg_offset_in_bytes(r0); }
rscratch1_offset_in_bytes()103 int rscratch1_offset_in_bytes() { return reg_offset_in_bytes(rscratch1); }
v0_offset_in_bytes(void)104 int v0_offset_in_bytes(void) { return 0; }
105
106 // Capture info about frame layout
107 // Note this is only correct when not saving full vectors.
108 enum layout {
109 fpu_state_off = 0,
110 fpu_state_end = fpu_state_off + FPUStateSizeInWords - 1,
111 // The frame sender code expects that rfp will be in
112 // the "natural" place and will override any oopMap
113 // setting for it. We must therefore force the layout
114 // so that it agrees with the frame sender code.
115 r0_off = fpu_state_off + FPUStateSizeInWords,
116 rfp_off = r0_off + (RegisterImpl::number_of_registers - 2) * RegisterImpl::max_slots_per_register,
117 return_off = rfp_off + RegisterImpl::max_slots_per_register, // slot for return address
118 reg_save_size = return_off + RegisterImpl::max_slots_per_register};
119
120 };
121
reg_offset_in_bytes(Register r)122 int RegisterSaver::reg_offset_in_bytes(Register r) {
123 // The integer registers are located above the floating point
124 // registers in the stack frame pushed by save_live_registers() so the
125 // offset depends on whether we are saving full vectors, and whether
126 // those vectors are NEON or SVE.
127
128 int slots_per_vect = FloatRegisterImpl::save_slots_per_register;
129
130 #if COMPILER2_OR_JVMCI
131 if (_save_vectors) {
132 slots_per_vect = FloatRegisterImpl::slots_per_neon_register;
133
134 #ifdef COMPILER2
135 if (Matcher::supports_scalable_vector()) {
136 slots_per_vect = Matcher::scalable_vector_reg_size(T_FLOAT);
137 }
138 #endif
139 }
140 #endif
141
142 int r0_offset = (slots_per_vect * FloatRegisterImpl::number_of_registers) * BytesPerInt;
143 return r0_offset + r->encoding() * wordSize;
144 }
145
save_live_registers(MacroAssembler * masm,int additional_frame_words,int * total_frame_words)146 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
147 bool use_sve = false;
148 int sve_vector_size_in_bytes = 0;
149 int sve_vector_size_in_slots = 0;
150
151 #ifdef COMPILER2
152 use_sve = Matcher::supports_scalable_vector();
153 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
154 sve_vector_size_in_slots = Matcher::scalable_vector_reg_size(T_FLOAT);
155 #endif
156
157 #if COMPILER2_OR_JVMCI
158 if (_save_vectors) {
159 int vect_words = 0;
160 int extra_save_slots_per_register = 0;
161 // Save upper half of vector registers
162 if (use_sve) {
163 extra_save_slots_per_register = sve_vector_size_in_slots - FloatRegisterImpl::save_slots_per_register;
164 } else {
165 extra_save_slots_per_register = FloatRegisterImpl::extra_save_slots_per_neon_register;
166 }
167 vect_words = FloatRegisterImpl::number_of_registers * extra_save_slots_per_register /
168 VMRegImpl::slots_per_word;
169 additional_frame_words += vect_words;
170 }
171 #else
172 assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
173 #endif
174
175 int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
176 reg_save_size * BytesPerInt, 16);
177 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
178 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
179 // The caller will allocate additional_frame_words
180 int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
181 // CodeBlob frame size is in words.
182 int frame_size_in_words = frame_size_in_bytes / wordSize;
183 *total_frame_words = frame_size_in_words;
184
185 // Save Integer and Float registers.
186 __ enter();
187 __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes);
188
189 // Set an oopmap for the call site. This oopmap will map all
190 // oop-registers and debug-info registers as callee-saved. This
191 // will allow deoptimization at this safepoint to find all possible
192 // debug-info recordings, as well as let GC find all oops.
193
194 OopMapSet *oop_maps = new OopMapSet();
195 OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
196
197 for (int i = 0; i < RegisterImpl::number_of_registers; i++) {
198 Register r = as_Register(i);
199 if (r <= rfp && r != rscratch1 && r != rscratch2) {
200 // SP offsets are in 4-byte words.
201 // Register slots are 8 bytes wide, 32 floating-point registers.
202 int sp_offset = RegisterImpl::max_slots_per_register * i +
203 FloatRegisterImpl::save_slots_per_register * FloatRegisterImpl::number_of_registers;
204 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots),
205 r->as_VMReg());
206 }
207 }
208
209 for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) {
210 FloatRegister r = as_FloatRegister(i);
211 int sp_offset = 0;
212 if (_save_vectors) {
213 sp_offset = use_sve ? (sve_vector_size_in_slots * i) :
214 (FloatRegisterImpl::slots_per_neon_register * i);
215 } else {
216 sp_offset = FloatRegisterImpl::save_slots_per_register * i;
217 }
218 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
219 r->as_VMReg());
220 }
221
222 return oop_map;
223 }
224
restore_live_registers(MacroAssembler * masm)225 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
226 #ifdef COMPILER2
227 __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
228 Matcher::scalable_vector_reg_size(T_BYTE));
229 #else
230 #if !INCLUDE_JVMCI
231 assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
232 #endif
233 __ pop_CPU_state(_save_vectors);
234 #endif
235 __ leave();
236
237 }
238
239 // Is vector's size (in bytes) bigger than a size saved by default?
240 // 8 bytes vector registers are saved by default on AArch64.
is_wide_vector(int size)241 bool SharedRuntime::is_wide_vector(int size) {
242 return size > 8;
243 }
244
245 // The java_calling_convention describes stack locations as ideal slots on
246 // a frame with no abi restrictions. Since we must observe abi restrictions
247 // (like the placement of the register window) the slots must be biased by
248 // the following value.
reg2offset_in(VMReg r)249 static int reg2offset_in(VMReg r) {
250 // Account for saved rfp and lr
251 // This should really be in_preserve_stack_slots
252 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
253 }
254
reg2offset_out(VMReg r)255 static int reg2offset_out(VMReg r) {
256 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
257 }
258
259 // ---------------------------------------------------------------------------
260 // Read the array of BasicTypes from a signature, and compute where the
261 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
262 // quantities. Values less than VMRegImpl::stack0 are registers, those above
263 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
264 // as framesizes are fixed.
265 // VMRegImpl::stack0 refers to the first slot 0(sp).
266 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
267 // up to RegisterImpl::number_of_registers) are the 64-bit
268 // integer registers.
269
270 // Note: the INPUTS in sig_bt are in units of Java argument words,
271 // which are 64-bit. The OUTPUTS are in 32-bit units.
272
273 // The Java calling convention is a "shifted" version of the C ABI.
274 // By skipping the first C ABI register we can call non-static jni
275 // methods with small numbers of arguments without having to shuffle
276 // the arguments at all. Since we control the java ABI we ought to at
277 // least get some advantage out of it.
278
java_calling_convention(const BasicType * sig_bt,VMRegPair * regs,int total_args_passed)279 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
280 VMRegPair *regs,
281 int total_args_passed) {
282
283 // Create the mapping between argument positions and
284 // registers.
285 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
286 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7
287 };
288 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
289 j_farg0, j_farg1, j_farg2, j_farg3,
290 j_farg4, j_farg5, j_farg6, j_farg7
291 };
292
293
294 uint int_args = 0;
295 uint fp_args = 0;
296 uint stk_args = 0; // inc by 2 each time
297
298 for (int i = 0; i < total_args_passed; i++) {
299 switch (sig_bt[i]) {
300 case T_BOOLEAN:
301 case T_CHAR:
302 case T_BYTE:
303 case T_SHORT:
304 case T_INT:
305 if (int_args < Argument::n_int_register_parameters_j) {
306 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
307 } else {
308 regs[i].set1(VMRegImpl::stack2reg(stk_args));
309 stk_args += 2;
310 }
311 break;
312 case T_VOID:
313 // halves of T_LONG or T_DOUBLE
314 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
315 regs[i].set_bad();
316 break;
317 case T_LONG:
318 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
319 // fall through
320 case T_OBJECT:
321 case T_ARRAY:
322 case T_ADDRESS:
323 if (int_args < Argument::n_int_register_parameters_j) {
324 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
325 } else {
326 regs[i].set2(VMRegImpl::stack2reg(stk_args));
327 stk_args += 2;
328 }
329 break;
330 case T_FLOAT:
331 if (fp_args < Argument::n_float_register_parameters_j) {
332 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
333 } else {
334 regs[i].set1(VMRegImpl::stack2reg(stk_args));
335 stk_args += 2;
336 }
337 break;
338 case T_DOUBLE:
339 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
340 if (fp_args < Argument::n_float_register_parameters_j) {
341 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
342 } else {
343 regs[i].set2(VMRegImpl::stack2reg(stk_args));
344 stk_args += 2;
345 }
346 break;
347 default:
348 ShouldNotReachHere();
349 break;
350 }
351 }
352
353 return align_up(stk_args, 2);
354 }
355
356 // Patch the callers callsite with entry to compiled code if it exists.
patch_callers_callsite(MacroAssembler * masm)357 static void patch_callers_callsite(MacroAssembler *masm) {
358 Label L;
359 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
360 __ cbz(rscratch1, L);
361
362 __ enter();
363 __ push_CPU_state();
364
365 // VM needs caller's callsite
366 // VM needs target method
367 // This needs to be a long call since we will relocate this adapter to
368 // the codeBuffer and it may not reach
369
370 #ifndef PRODUCT
371 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
372 #endif
373
374 __ mov(c_rarg0, rmethod);
375 __ mov(c_rarg1, lr);
376 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
377 __ blr(rscratch1);
378
379 // Explicit isb required because fixup_callers_callsite may change the code
380 // stream.
381 __ safepoint_isb();
382
383 __ pop_CPU_state();
384 // restore sp
385 __ leave();
386 __ bind(L);
387 }
388
gen_c2i_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,Label & skip_fixup)389 static void gen_c2i_adapter(MacroAssembler *masm,
390 int total_args_passed,
391 int comp_args_on_stack,
392 const BasicType *sig_bt,
393 const VMRegPair *regs,
394 Label& skip_fixup) {
395 // Before we get into the guts of the C2I adapter, see if we should be here
396 // at all. We've come from compiled code and are attempting to jump to the
397 // interpreter, which means the caller made a static call to get here
398 // (vcalls always get a compiled target if there is one). Check for a
399 // compiled target. If there is one, we need to patch the caller's call.
400 patch_callers_callsite(masm);
401
402 __ bind(skip_fixup);
403
404 int words_pushed = 0;
405
406 // Since all args are passed on the stack, total_args_passed *
407 // Interpreter::stackElementSize is the space we need.
408
409 int extraspace = total_args_passed * Interpreter::stackElementSize;
410
411 __ mov(r13, sp);
412
413 // stack is aligned, keep it that way
414 extraspace = align_up(extraspace, 2*wordSize);
415
416 if (extraspace)
417 __ sub(sp, sp, extraspace);
418
419 // Now write the args into the outgoing interpreter space
420 for (int i = 0; i < total_args_passed; i++) {
421 if (sig_bt[i] == T_VOID) {
422 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
423 continue;
424 }
425
426 // offset to start parameters
427 int st_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
428 int next_off = st_off - Interpreter::stackElementSize;
429
430 // Say 4 args:
431 // i st_off
432 // 0 32 T_LONG
433 // 1 24 T_VOID
434 // 2 16 T_OBJECT
435 // 3 8 T_BOOL
436 // - 0 return address
437 //
438 // However to make thing extra confusing. Because we can fit a Java long/double in
439 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
440 // leaves one slot empty and only stores to a single slot. In this case the
441 // slot that is occupied is the T_VOID slot. See I said it was confusing.
442
443 VMReg r_1 = regs[i].first();
444 VMReg r_2 = regs[i].second();
445 if (!r_1->is_valid()) {
446 assert(!r_2->is_valid(), "");
447 continue;
448 }
449 if (r_1->is_stack()) {
450 // memory to memory use rscratch1
451 int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
452 + extraspace
453 + words_pushed * wordSize);
454 if (!r_2->is_valid()) {
455 // sign extend??
456 __ ldrw(rscratch1, Address(sp, ld_off));
457 __ str(rscratch1, Address(sp, st_off));
458
459 } else {
460
461 __ ldr(rscratch1, Address(sp, ld_off));
462
463 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
464 // T_DOUBLE and T_LONG use two slots in the interpreter
465 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
466 // ld_off == LSW, ld_off+wordSize == MSW
467 // st_off == MSW, next_off == LSW
468 __ str(rscratch1, Address(sp, next_off));
469 #ifdef ASSERT
470 // Overwrite the unused slot with known junk
471 __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
472 __ str(rscratch1, Address(sp, st_off));
473 #endif /* ASSERT */
474 } else {
475 __ str(rscratch1, Address(sp, st_off));
476 }
477 }
478 } else if (r_1->is_Register()) {
479 Register r = r_1->as_Register();
480 if (!r_2->is_valid()) {
481 // must be only an int (or less ) so move only 32bits to slot
482 // why not sign extend??
483 __ str(r, Address(sp, st_off));
484 } else {
485 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
486 // T_DOUBLE and T_LONG use two slots in the interpreter
487 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
488 // jlong/double in gpr
489 #ifdef ASSERT
490 // Overwrite the unused slot with known junk
491 __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
492 __ str(rscratch1, Address(sp, st_off));
493 #endif /* ASSERT */
494 __ str(r, Address(sp, next_off));
495 } else {
496 __ str(r, Address(sp, st_off));
497 }
498 }
499 } else {
500 assert(r_1->is_FloatRegister(), "");
501 if (!r_2->is_valid()) {
502 // only a float use just part of the slot
503 __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
504 } else {
505 #ifdef ASSERT
506 // Overwrite the unused slot with known junk
507 __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
508 __ str(rscratch1, Address(sp, st_off));
509 #endif /* ASSERT */
510 __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
511 }
512 }
513 }
514
515 __ mov(esp, sp); // Interp expects args on caller's expression stack
516
517 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
518 __ br(rscratch1);
519 }
520
521
gen_i2c_adapter(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs)522 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
523 int total_args_passed,
524 int comp_args_on_stack,
525 const BasicType *sig_bt,
526 const VMRegPair *regs) {
527
528 // Note: r13 contains the senderSP on entry. We must preserve it since
529 // we may do a i2c -> c2i transition if we lose a race where compiled
530 // code goes non-entrant while we get args ready.
531
532 // In addition we use r13 to locate all the interpreter args because
533 // we must align the stack to 16 bytes.
534
535 // Adapters are frameless.
536
537 // An i2c adapter is frameless because the *caller* frame, which is
538 // interpreted, routinely repairs its own esp (from
539 // interpreter_frame_last_sp), even if a callee has modified the
540 // stack pointer. It also recalculates and aligns sp.
541
542 // A c2i adapter is frameless because the *callee* frame, which is
543 // interpreted, routinely repairs its caller's sp (from sender_sp,
544 // which is set up via the senderSP register).
545
546 // In other words, if *either* the caller or callee is interpreted, we can
547 // get the stack pointer repaired after a call.
548
549 // This is why c2i and i2c adapters cannot be indefinitely composed.
550 // In particular, if a c2i adapter were to somehow call an i2c adapter,
551 // both caller and callee would be compiled methods, and neither would
552 // clean up the stack pointer changes performed by the two adapters.
553 // If this happens, control eventually transfers back to the compiled
554 // caller, but with an uncorrected stack, causing delayed havoc.
555
556 if (VerifyAdapterCalls &&
557 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
558 #if 0
559 // So, let's test for cascading c2i/i2c adapters right now.
560 // assert(Interpreter::contains($return_addr) ||
561 // StubRoutines::contains($return_addr),
562 // "i2c adapter must return to an interpreter frame");
563 __ block_comment("verify_i2c { ");
564 Label L_ok;
565 if (Interpreter::code() != NULL)
566 range_check(masm, rax, r11,
567 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
568 L_ok);
569 if (StubRoutines::code1() != NULL)
570 range_check(masm, rax, r11,
571 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
572 L_ok);
573 if (StubRoutines::code2() != NULL)
574 range_check(masm, rax, r11,
575 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
576 L_ok);
577 const char* msg = "i2c adapter must return to an interpreter frame";
578 __ block_comment(msg);
579 __ stop(msg);
580 __ bind(L_ok);
581 __ block_comment("} verify_i2ce ");
582 #endif
583 }
584
585 // Cut-out for having no stack args.
586 int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
587 if (comp_args_on_stack) {
588 __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
589 __ andr(sp, rscratch1, -16);
590 }
591
592 // Will jump to the compiled code just as if compiled code was doing it.
593 // Pre-load the register-jump target early, to schedule it better.
594 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
595
596 #if INCLUDE_JVMCI
597 if (EnableJVMCI) {
598 // check if this call should be routed towards a specific entry point
599 __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
600 Label no_alternative_target;
601 __ cbz(rscratch2, no_alternative_target);
602 __ mov(rscratch1, rscratch2);
603 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
604 __ bind(no_alternative_target);
605 }
606 #endif // INCLUDE_JVMCI
607
608 // Now generate the shuffle code.
609 for (int i = 0; i < total_args_passed; i++) {
610 if (sig_bt[i] == T_VOID) {
611 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
612 continue;
613 }
614
615 // Pick up 0, 1 or 2 words from SP+offset.
616
617 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
618 "scrambled load targets?");
619 // Load in argument order going down.
620 int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
621 // Point to interpreter value (vs. tag)
622 int next_off = ld_off - Interpreter::stackElementSize;
623 //
624 //
625 //
626 VMReg r_1 = regs[i].first();
627 VMReg r_2 = regs[i].second();
628 if (!r_1->is_valid()) {
629 assert(!r_2->is_valid(), "");
630 continue;
631 }
632 if (r_1->is_stack()) {
633 // Convert stack slot to an SP offset (+ wordSize to account for return address )
634 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
635 if (!r_2->is_valid()) {
636 // sign extend???
637 __ ldrsw(rscratch2, Address(esp, ld_off));
638 __ str(rscratch2, Address(sp, st_off));
639 } else {
640 //
641 // We are using two optoregs. This can be either T_OBJECT,
642 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
643 // two slots but only uses one for thr T_LONG or T_DOUBLE case
644 // So we must adjust where to pick up the data to match the
645 // interpreter.
646 //
647 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
648 // are accessed as negative so LSW is at LOW address
649
650 // ld_off is MSW so get LSW
651 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
652 next_off : ld_off;
653 __ ldr(rscratch2, Address(esp, offset));
654 // st_off is LSW (i.e. reg.first())
655 __ str(rscratch2, Address(sp, st_off));
656 }
657 } else if (r_1->is_Register()) { // Register argument
658 Register r = r_1->as_Register();
659 if (r_2->is_valid()) {
660 //
661 // We are using two VMRegs. This can be either T_OBJECT,
662 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
663 // two slots but only uses one for thr T_LONG or T_DOUBLE case
664 // So we must adjust where to pick up the data to match the
665 // interpreter.
666
667 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
668 next_off : ld_off;
669
670 // this can be a misaligned move
671 __ ldr(r, Address(esp, offset));
672 } else {
673 // sign extend and use a full word?
674 __ ldrw(r, Address(esp, ld_off));
675 }
676 } else {
677 if (!r_2->is_valid()) {
678 __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
679 } else {
680 __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
681 }
682 }
683 }
684
685 // 6243940 We might end up in handle_wrong_method if
686 // the callee is deoptimized as we race thru here. If that
687 // happens we don't want to take a safepoint because the
688 // caller frame will look interpreted and arguments are now
689 // "compiled" so it is much better to make this transition
690 // invisible to the stack walking code. Unfortunately if
691 // we try and find the callee by normal means a safepoint
692 // is possible. So we stash the desired callee in the thread
693 // and the vm will find there should this case occur.
694
695 __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
696
697 __ br(rscratch1);
698 }
699
700 // ---------------------------------------------------------------
generate_i2c2i_adapters(MacroAssembler * masm,int total_args_passed,int comp_args_on_stack,const BasicType * sig_bt,const VMRegPair * regs,AdapterFingerPrint * fingerprint)701 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
702 int total_args_passed,
703 int comp_args_on_stack,
704 const BasicType *sig_bt,
705 const VMRegPair *regs,
706 AdapterFingerPrint* fingerprint) {
707 address i2c_entry = __ pc();
708
709 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
710
711 address c2i_unverified_entry = __ pc();
712 Label skip_fixup;
713
714 Label ok;
715
716 Register holder = rscratch2;
717 Register receiver = j_rarg0;
718 Register tmp = r10; // A call-clobbered register not used for arg passing
719
720 // -------------------------------------------------------------------------
721 // Generate a C2I adapter. On entry we know rmethod holds the Method* during calls
722 // to the interpreter. The args start out packed in the compiled layout. They
723 // need to be unpacked into the interpreter layout. This will almost always
724 // require some stack space. We grow the current (compiled) stack, then repack
725 // the args. We finally end in a jump to the generic interpreter entry point.
726 // On exit from the interpreter, the interpreter will restore our SP (lest the
727 // compiled code, which relys solely on SP and not FP, get sick).
728
729 {
730 __ block_comment("c2i_unverified_entry {");
731 __ load_klass(rscratch1, receiver);
732 __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
733 __ cmp(rscratch1, tmp);
734 __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
735 __ br(Assembler::EQ, ok);
736 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
737
738 __ bind(ok);
739 // Method might have been compiled since the call site was patched to
740 // interpreted; if that is the case treat it as a miss so we can get
741 // the call site corrected.
742 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
743 __ cbz(rscratch1, skip_fixup);
744 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
745 __ block_comment("} c2i_unverified_entry");
746 }
747
748 address c2i_entry = __ pc();
749
750 // Class initialization barrier for static methods
751 address c2i_no_clinit_check_entry = NULL;
752 if (VM_Version::supports_fast_class_init_checks()) {
753 Label L_skip_barrier;
754
755 { // Bypass the barrier for non-static methods
756 __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
757 __ andsw(zr, rscratch1, JVM_ACC_STATIC);
758 __ br(Assembler::EQ, L_skip_barrier); // non-static
759 }
760
761 __ load_method_holder(rscratch2, rmethod);
762 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
763 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
764
765 __ bind(L_skip_barrier);
766 c2i_no_clinit_check_entry = __ pc();
767 }
768
769 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
770 bs->c2i_entry_barrier(masm);
771
772 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
773
774 __ flush();
775 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
776 }
777
c_calling_convention_priv(const BasicType * sig_bt,VMRegPair * regs,VMRegPair * regs2,int total_args_passed)778 static int c_calling_convention_priv(const BasicType *sig_bt,
779 VMRegPair *regs,
780 VMRegPair *regs2,
781 int total_args_passed) {
782 assert(regs2 == NULL, "not needed on AArch64");
783
784 // We return the amount of VMRegImpl stack slots we need to reserve for all
785 // the arguments NOT counting out_preserve_stack_slots.
786
787 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
788 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7
789 };
790 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
791 c_farg0, c_farg1, c_farg2, c_farg3,
792 c_farg4, c_farg5, c_farg6, c_farg7
793 };
794
795 uint int_args = 0;
796 uint fp_args = 0;
797 uint stk_args = 0; // inc by 2 each time
798
799 for (int i = 0; i < total_args_passed; i++) {
800 switch (sig_bt[i]) {
801 case T_BOOLEAN:
802 case T_CHAR:
803 case T_BYTE:
804 case T_SHORT:
805 case T_INT:
806 if (int_args < Argument::n_int_register_parameters_c) {
807 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
808 } else {
809 #ifdef __APPLE__
810 // Less-than word types are stored one after another.
811 // The code is unable to handle this so bailout.
812 return -1;
813 #endif
814 regs[i].set1(VMRegImpl::stack2reg(stk_args));
815 stk_args += 2;
816 }
817 break;
818 case T_LONG:
819 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
820 // fall through
821 case T_OBJECT:
822 case T_ARRAY:
823 case T_ADDRESS:
824 case T_METADATA:
825 if (int_args < Argument::n_int_register_parameters_c) {
826 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
827 } else {
828 regs[i].set2(VMRegImpl::stack2reg(stk_args));
829 stk_args += 2;
830 }
831 break;
832 case T_FLOAT:
833 if (fp_args < Argument::n_float_register_parameters_c) {
834 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
835 } else {
836 #ifdef __APPLE__
837 // Less-than word types are stored one after another.
838 // The code is unable to handle this so bailout.
839 return -1;
840 #endif
841 regs[i].set1(VMRegImpl::stack2reg(stk_args));
842 stk_args += 2;
843 }
844 break;
845 case T_DOUBLE:
846 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
847 if (fp_args < Argument::n_float_register_parameters_c) {
848 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
849 } else {
850 regs[i].set2(VMRegImpl::stack2reg(stk_args));
851 stk_args += 2;
852 }
853 break;
854 case T_VOID: // Halves of longs and doubles
855 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
856 regs[i].set_bad();
857 break;
858 default:
859 ShouldNotReachHere();
860 break;
861 }
862 }
863
864 return stk_args;
865 }
866
vector_calling_convention(VMRegPair * regs,uint num_bits,uint total_args_passed)867 int SharedRuntime::vector_calling_convention(VMRegPair *regs,
868 uint num_bits,
869 uint total_args_passed) {
870 Unimplemented();
871 return 0;
872 }
873
c_calling_convention(const BasicType * sig_bt,VMRegPair * regs,VMRegPair * regs2,int total_args_passed)874 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
875 VMRegPair *regs,
876 VMRegPair *regs2,
877 int total_args_passed)
878 {
879 int result = c_calling_convention_priv(sig_bt, regs, regs2, total_args_passed);
880 guarantee(result >= 0, "Unsupported arguments configuration");
881 return result;
882 }
883
884 // On 64 bit we will store integer like items to the stack as
885 // 64 bits items (Aarch64 abi) even though java would only store
886 // 32bits for a parameter. On 32bit it will simply be 32 bits
887 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
move32_64(MacroAssembler * masm,VMRegPair src,VMRegPair dst)888 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
889 if (src.first()->is_stack()) {
890 if (dst.first()->is_stack()) {
891 // stack to stack
892 __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
893 __ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
894 } else {
895 // stack to reg
896 __ ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
897 }
898 } else if (dst.first()->is_stack()) {
899 // reg to stack
900 // Do we really have to sign extend???
901 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
902 __ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
903 } else {
904 if (dst.first() != src.first()) {
905 __ sxtw(dst.first()->as_Register(), src.first()->as_Register());
906 }
907 }
908 }
909
910 // An oop arg. Must pass a handle not the oop itself
object_move(MacroAssembler * masm,OopMap * map,int oop_handle_offset,int framesize_in_slots,VMRegPair src,VMRegPair dst,bool is_receiver,int * receiver_offset)911 static void object_move(MacroAssembler* masm,
912 OopMap* map,
913 int oop_handle_offset,
914 int framesize_in_slots,
915 VMRegPair src,
916 VMRegPair dst,
917 bool is_receiver,
918 int* receiver_offset) {
919
920 // must pass a handle. First figure out the location we use as a handle
921
922 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
923
924 // See if oop is NULL if it is we need no handle
925
926 if (src.first()->is_stack()) {
927
928 // Oop is already on the stack as an argument
929 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
930 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
931 if (is_receiver) {
932 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
933 }
934
935 __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
936 __ lea(rHandle, Address(rfp, reg2offset_in(src.first())));
937 // conditionally move a NULL
938 __ cmp(rscratch1, zr);
939 __ csel(rHandle, zr, rHandle, Assembler::EQ);
940 } else {
941
942 // Oop is in an a register we must store it to the space we reserve
943 // on the stack for oop_handles and pass a handle if oop is non-NULL
944
945 const Register rOop = src.first()->as_Register();
946 int oop_slot;
947 if (rOop == j_rarg0)
948 oop_slot = 0;
949 else if (rOop == j_rarg1)
950 oop_slot = 1;
951 else if (rOop == j_rarg2)
952 oop_slot = 2;
953 else if (rOop == j_rarg3)
954 oop_slot = 3;
955 else if (rOop == j_rarg4)
956 oop_slot = 4;
957 else if (rOop == j_rarg5)
958 oop_slot = 5;
959 else if (rOop == j_rarg6)
960 oop_slot = 6;
961 else {
962 assert(rOop == j_rarg7, "wrong register");
963 oop_slot = 7;
964 }
965
966 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
967 int offset = oop_slot*VMRegImpl::stack_slot_size;
968
969 map->set_oop(VMRegImpl::stack2reg(oop_slot));
970 // Store oop in handle area, may be NULL
971 __ str(rOop, Address(sp, offset));
972 if (is_receiver) {
973 *receiver_offset = offset;
974 }
975
976 __ cmp(rOop, zr);
977 __ lea(rHandle, Address(sp, offset));
978 // conditionally move a NULL
979 __ csel(rHandle, zr, rHandle, Assembler::EQ);
980 }
981
982 // If arg is on the stack then place it otherwise it is already in correct reg.
983 if (dst.first()->is_stack()) {
984 __ str(rHandle, Address(sp, reg2offset_out(dst.first())));
985 }
986 }
987
988 // A float arg may have to do float reg int reg conversion
float_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)989 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
990 assert(src.first()->is_stack() && dst.first()->is_stack() ||
991 src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
992 if (src.first()->is_stack()) {
993 if (dst.first()->is_stack()) {
994 __ ldrw(rscratch1, Address(rfp, reg2offset_in(src.first())));
995 __ strw(rscratch1, Address(sp, reg2offset_out(dst.first())));
996 } else {
997 ShouldNotReachHere();
998 }
999 } else if (src.first() != dst.first()) {
1000 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
1001 __ fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1002 else
1003 ShouldNotReachHere();
1004 }
1005 }
1006
1007 // A long move
long_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1008 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1009 if (src.first()->is_stack()) {
1010 if (dst.first()->is_stack()) {
1011 // stack to stack
1012 __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
1013 __ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
1014 } else {
1015 // stack to reg
1016 __ ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
1017 }
1018 } else if (dst.first()->is_stack()) {
1019 // reg to stack
1020 // Do we really have to sign extend???
1021 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1022 __ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
1023 } else {
1024 if (dst.first() != src.first()) {
1025 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1026 }
1027 }
1028 }
1029
1030
1031 // A double move
double_move(MacroAssembler * masm,VMRegPair src,VMRegPair dst)1032 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1033 assert(src.first()->is_stack() && dst.first()->is_stack() ||
1034 src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
1035 if (src.first()->is_stack()) {
1036 if (dst.first()->is_stack()) {
1037 __ ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
1038 __ str(rscratch1, Address(sp, reg2offset_out(dst.first())));
1039 } else {
1040 ShouldNotReachHere();
1041 }
1042 } else if (src.first() != dst.first()) {
1043 if (src.is_single_phys_reg() && dst.is_single_phys_reg())
1044 __ fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1045 else
1046 ShouldNotReachHere();
1047 }
1048 }
1049
1050
save_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1051 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1052 // We always ignore the frame_slots arg and just use the space just below frame pointer
1053 // which by this time is free to use
1054 switch (ret_type) {
1055 case T_FLOAT:
1056 __ strs(v0, Address(rfp, -wordSize));
1057 break;
1058 case T_DOUBLE:
1059 __ strd(v0, Address(rfp, -wordSize));
1060 break;
1061 case T_VOID: break;
1062 default: {
1063 __ str(r0, Address(rfp, -wordSize));
1064 }
1065 }
1066 }
1067
restore_native_result(MacroAssembler * masm,BasicType ret_type,int frame_slots)1068 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1069 // We always ignore the frame_slots arg and just use the space just below frame pointer
1070 // which by this time is free to use
1071 switch (ret_type) {
1072 case T_FLOAT:
1073 __ ldrs(v0, Address(rfp, -wordSize));
1074 break;
1075 case T_DOUBLE:
1076 __ ldrd(v0, Address(rfp, -wordSize));
1077 break;
1078 case T_VOID: break;
1079 default: {
1080 __ ldr(r0, Address(rfp, -wordSize));
1081 }
1082 }
1083 }
save_args(MacroAssembler * masm,int arg_count,int first_arg,VMRegPair * args)1084 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1085 RegSet x;
1086 for ( int i = first_arg ; i < arg_count ; i++ ) {
1087 if (args[i].first()->is_Register()) {
1088 x = x + args[i].first()->as_Register();
1089 } else if (args[i].first()->is_FloatRegister()) {
1090 __ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize)));
1091 }
1092 }
1093 __ push(x, sp);
1094 }
1095
restore_args(MacroAssembler * masm,int arg_count,int first_arg,VMRegPair * args)1096 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1097 RegSet x;
1098 for ( int i = first_arg ; i < arg_count ; i++ ) {
1099 if (args[i].first()->is_Register()) {
1100 x = x + args[i].first()->as_Register();
1101 } else {
1102 ;
1103 }
1104 }
1105 __ pop(x, sp);
1106 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1107 if (args[i].first()->is_Register()) {
1108 ;
1109 } else if (args[i].first()->is_FloatRegister()) {
1110 __ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize)));
1111 }
1112 }
1113 }
1114
1115 // Unpack an array argument into a pointer to the body and the length
1116 // if the array is non-null, otherwise pass 0 for both.
unpack_array_argument(MacroAssembler * masm,VMRegPair reg,BasicType in_elem_type,VMRegPair body_arg,VMRegPair length_arg)1117 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); }
1118
1119
1120 class ComputeMoveOrder: public StackObj {
1121 class MoveOperation: public ResourceObj {
1122 friend class ComputeMoveOrder;
1123 private:
1124 VMRegPair _src;
1125 VMRegPair _dst;
1126 int _src_index;
1127 int _dst_index;
1128 bool _processed;
1129 MoveOperation* _next;
1130 MoveOperation* _prev;
1131
get_id(VMRegPair r)1132 static int get_id(VMRegPair r) { Unimplemented(); return 0; }
1133
1134 public:
MoveOperation(int src_index,VMRegPair src,int dst_index,VMRegPair dst)1135 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1136 _src(src)
1137 , _dst(dst)
1138 , _src_index(src_index)
1139 , _dst_index(dst_index)
1140 , _processed(false)
1141 , _next(NULL)
1142 , _prev(NULL) { Unimplemented(); }
1143
src() const1144 VMRegPair src() const { Unimplemented(); return _src; }
src_id() const1145 int src_id() const { Unimplemented(); return 0; }
src_index() const1146 int src_index() const { Unimplemented(); return 0; }
dst() const1147 VMRegPair dst() const { Unimplemented(); return _src; }
set_dst(int i,VMRegPair dst)1148 void set_dst(int i, VMRegPair dst) { Unimplemented(); }
dst_index() const1149 int dst_index() const { Unimplemented(); return 0; }
dst_id() const1150 int dst_id() const { Unimplemented(); return 0; }
next() const1151 MoveOperation* next() const { Unimplemented(); return 0; }
prev() const1152 MoveOperation* prev() const { Unimplemented(); return 0; }
set_processed()1153 void set_processed() { Unimplemented(); }
is_processed() const1154 bool is_processed() const { Unimplemented(); return 0; }
1155
1156 // insert
break_cycle(VMRegPair temp_register)1157 void break_cycle(VMRegPair temp_register) { Unimplemented(); }
1158
link(GrowableArray<MoveOperation * > & killer)1159 void link(GrowableArray<MoveOperation*>& killer) { Unimplemented(); }
1160 };
1161
1162 private:
1163 GrowableArray<MoveOperation*> edges;
1164
1165 public:
ComputeMoveOrder(int total_in_args,VMRegPair * in_regs,int total_c_args,VMRegPair * out_regs,BasicType * in_sig_bt,GrowableArray<int> & arg_order,VMRegPair tmp_vmreg)1166 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1167 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); }
1168
1169 // Collected all the move operations
add_edge(int src_index,VMRegPair src,int dst_index,VMRegPair dst)1170 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); }
1171
1172 // Walk the edges breaking cycles between moves. The result list
1173 // can be walked in order to produce the proper set of loads
get_store_order(VMRegPair temp_register)1174 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; }
1175 };
1176
1177
rt_call(MacroAssembler * masm,address dest)1178 static void rt_call(MacroAssembler* masm, address dest) {
1179 CodeBlob *cb = CodeCache::find_blob(dest);
1180 if (cb) {
1181 __ far_call(RuntimeAddress(dest));
1182 } else {
1183 __ lea(rscratch1, RuntimeAddress(dest));
1184 __ blr(rscratch1);
1185 }
1186 }
1187
verify_oop_args(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1188 static void verify_oop_args(MacroAssembler* masm,
1189 const methodHandle& method,
1190 const BasicType* sig_bt,
1191 const VMRegPair* regs) {
1192 Register temp_reg = r19; // not part of any compiled calling seq
1193 if (VerifyOops) {
1194 for (int i = 0; i < method->size_of_parameters(); i++) {
1195 if (sig_bt[i] == T_OBJECT ||
1196 sig_bt[i] == T_ARRAY) {
1197 VMReg r = regs[i].first();
1198 assert(r->is_valid(), "bad oop arg");
1199 if (r->is_stack()) {
1200 __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1201 __ verify_oop(temp_reg);
1202 } else {
1203 __ verify_oop(r->as_Register());
1204 }
1205 }
1206 }
1207 }
1208 }
1209
gen_special_dispatch(MacroAssembler * masm,const methodHandle & method,const BasicType * sig_bt,const VMRegPair * regs)1210 static void gen_special_dispatch(MacroAssembler* masm,
1211 const methodHandle& method,
1212 const BasicType* sig_bt,
1213 const VMRegPair* regs) {
1214 verify_oop_args(masm, method, sig_bt, regs);
1215 vmIntrinsics::ID iid = method->intrinsic_id();
1216
1217 // Now write the args into the outgoing interpreter space
1218 bool has_receiver = false;
1219 Register receiver_reg = noreg;
1220 int member_arg_pos = -1;
1221 Register member_reg = noreg;
1222 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1223 if (ref_kind != 0) {
1224 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1225 member_reg = r19; // known to be free at this point
1226 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1227 } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
1228 has_receiver = true;
1229 } else {
1230 fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
1231 }
1232
1233 if (member_reg != noreg) {
1234 // Load the member_arg into register, if necessary.
1235 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1236 VMReg r = regs[member_arg_pos].first();
1237 if (r->is_stack()) {
1238 __ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1239 } else {
1240 // no data motion is needed
1241 member_reg = r->as_Register();
1242 }
1243 }
1244
1245 if (has_receiver) {
1246 // Make sure the receiver is loaded into a register.
1247 assert(method->size_of_parameters() > 0, "oob");
1248 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1249 VMReg r = regs[0].first();
1250 assert(r->is_valid(), "bad receiver arg");
1251 if (r->is_stack()) {
1252 // Porting note: This assumes that compiled calling conventions always
1253 // pass the receiver oop in a register. If this is not true on some
1254 // platform, pick a temp and load the receiver from stack.
1255 fatal("receiver always in a register");
1256 receiver_reg = r2; // known to be free at this point
1257 __ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1258 } else {
1259 // no data motion is needed
1260 receiver_reg = r->as_Register();
1261 }
1262 }
1263
1264 // Figure out which address we are really jumping to:
1265 MethodHandles::generate_method_handle_dispatch(masm, iid,
1266 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1267 }
1268
1269 // ---------------------------------------------------------------------------
1270 // Generate a native wrapper for a given method. The method takes arguments
1271 // in the Java compiled code convention, marshals them to the native
1272 // convention (handlizes oops, etc), transitions to native, makes the call,
1273 // returns to java state (possibly blocking), unhandlizes any result and
1274 // returns.
1275 //
1276 // Critical native functions are a shorthand for the use of
1277 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1278 // functions. The wrapper is expected to unpack the arguments before
1279 // passing them to the callee. Critical native functions leave the state _in_Java,
1280 // since they block out GC.
1281 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1282 // block and the check for pending exceptions it's impossible for them
1283 // to be thrown.
1284 //
generate_native_wrapper(MacroAssembler * masm,const methodHandle & method,int compile_id,BasicType * in_sig_bt,VMRegPair * in_regs,BasicType ret_type,address critical_entry)1285 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1286 const methodHandle& method,
1287 int compile_id,
1288 BasicType* in_sig_bt,
1289 VMRegPair* in_regs,
1290 BasicType ret_type,
1291 address critical_entry) {
1292 if (method->is_method_handle_intrinsic()) {
1293 vmIntrinsics::ID iid = method->intrinsic_id();
1294 intptr_t start = (intptr_t)__ pc();
1295 int vep_offset = ((intptr_t)__ pc()) - start;
1296
1297 // First instruction must be a nop as it may need to be patched on deoptimisation
1298 __ nop();
1299 gen_special_dispatch(masm,
1300 method,
1301 in_sig_bt,
1302 in_regs);
1303 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1304 __ flush();
1305 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1306 return nmethod::new_native_nmethod(method,
1307 compile_id,
1308 masm->code(),
1309 vep_offset,
1310 frame_complete,
1311 stack_slots / VMRegImpl::slots_per_word,
1312 in_ByteSize(-1),
1313 in_ByteSize(-1),
1314 (OopMapSet*)NULL);
1315 }
1316 bool is_critical_native = true;
1317 address native_func = critical_entry;
1318 if (native_func == NULL) {
1319 native_func = method->native_function();
1320 is_critical_native = false;
1321 }
1322 assert(native_func != NULL, "must have function");
1323
1324 // An OopMap for lock (and class if static)
1325 OopMapSet *oop_maps = new OopMapSet();
1326 intptr_t start = (intptr_t)__ pc();
1327
1328 // We have received a description of where all the java arg are located
1329 // on entry to the wrapper. We need to convert these args to where
1330 // the jni function will expect them. To figure out where they go
1331 // we convert the java signature to a C signature by inserting
1332 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1333
1334 const int total_in_args = method->size_of_parameters();
1335 int total_c_args = total_in_args;
1336 if (!is_critical_native) {
1337 total_c_args += 1;
1338 if (method->is_static()) {
1339 total_c_args++;
1340 }
1341 } else {
1342 for (int i = 0; i < total_in_args; i++) {
1343 if (in_sig_bt[i] == T_ARRAY) {
1344 total_c_args++;
1345 }
1346 }
1347 }
1348
1349 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1350 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1351 BasicType* in_elem_bt = NULL;
1352
1353 int argc = 0;
1354 if (!is_critical_native) {
1355 out_sig_bt[argc++] = T_ADDRESS;
1356 if (method->is_static()) {
1357 out_sig_bt[argc++] = T_OBJECT;
1358 }
1359
1360 for (int i = 0; i < total_in_args ; i++ ) {
1361 out_sig_bt[argc++] = in_sig_bt[i];
1362 }
1363 } else {
1364 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1365 SignatureStream ss(method->signature());
1366 for (int i = 0; i < total_in_args ; i++ ) {
1367 if (in_sig_bt[i] == T_ARRAY) {
1368 // Arrays are passed as int, elem* pair
1369 out_sig_bt[argc++] = T_INT;
1370 out_sig_bt[argc++] = T_ADDRESS;
1371 ss.skip_array_prefix(1); // skip one '['
1372 assert(ss.is_primitive(), "primitive type expected");
1373 in_elem_bt[i] = ss.type();
1374 } else {
1375 out_sig_bt[argc++] = in_sig_bt[i];
1376 in_elem_bt[i] = T_VOID;
1377 }
1378 if (in_sig_bt[i] != T_VOID) {
1379 assert(in_sig_bt[i] == ss.type() ||
1380 in_sig_bt[i] == T_ARRAY, "must match");
1381 ss.next();
1382 }
1383 }
1384 }
1385
1386 // Now figure out where the args must be stored and how much stack space
1387 // they require.
1388 int out_arg_slots;
1389 out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, NULL, total_c_args);
1390
1391 if (out_arg_slots < 0) {
1392 return NULL;
1393 }
1394
1395 // Compute framesize for the wrapper. We need to handlize all oops in
1396 // incoming registers
1397
1398 // Calculate the total number of stack slots we will need.
1399
1400 // First count the abi requirement plus all of the outgoing args
1401 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1402
1403 // Now the space for the inbound oop handle area
1404 int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers
1405 if (is_critical_native) {
1406 // Critical natives may have to call out so they need a save area
1407 // for register arguments.
1408 int double_slots = 0;
1409 int single_slots = 0;
1410 for ( int i = 0; i < total_in_args; i++) {
1411 if (in_regs[i].first()->is_Register()) {
1412 const Register reg = in_regs[i].first()->as_Register();
1413 switch (in_sig_bt[i]) {
1414 case T_BOOLEAN:
1415 case T_BYTE:
1416 case T_SHORT:
1417 case T_CHAR:
1418 case T_INT: single_slots++; break;
1419 case T_ARRAY: // specific to LP64 (7145024)
1420 case T_LONG: double_slots++; break;
1421 default: ShouldNotReachHere();
1422 }
1423 } else if (in_regs[i].first()->is_FloatRegister()) {
1424 ShouldNotReachHere();
1425 }
1426 }
1427 total_save_slots = double_slots * 2 + single_slots;
1428 // align the save area
1429 if (double_slots != 0) {
1430 stack_slots = align_up(stack_slots, 2);
1431 }
1432 }
1433
1434 int oop_handle_offset = stack_slots;
1435 stack_slots += total_save_slots;
1436
1437 // Now any space we need for handlizing a klass if static method
1438
1439 int klass_slot_offset = 0;
1440 int klass_offset = -1;
1441 int lock_slot_offset = 0;
1442 bool is_static = false;
1443
1444 if (method->is_static()) {
1445 klass_slot_offset = stack_slots;
1446 stack_slots += VMRegImpl::slots_per_word;
1447 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1448 is_static = true;
1449 }
1450
1451 // Plus a lock if needed
1452
1453 if (method->is_synchronized()) {
1454 lock_slot_offset = stack_slots;
1455 stack_slots += VMRegImpl::slots_per_word;
1456 }
1457
1458 // Now a place (+2) to save return values or temp during shuffling
1459 // + 4 for return address (which we own) and saved rfp
1460 stack_slots += 6;
1461
1462 // Ok The space we have allocated will look like:
1463 //
1464 //
1465 // FP-> | |
1466 // |---------------------|
1467 // | 2 slots for moves |
1468 // |---------------------|
1469 // | lock box (if sync) |
1470 // |---------------------| <- lock_slot_offset
1471 // | klass (if static) |
1472 // |---------------------| <- klass_slot_offset
1473 // | oopHandle area |
1474 // |---------------------| <- oop_handle_offset (8 java arg registers)
1475 // | outbound memory |
1476 // | based arguments |
1477 // | |
1478 // |---------------------|
1479 // | |
1480 // SP-> | out_preserved_slots |
1481 //
1482 //
1483
1484
1485 // Now compute actual number of stack words we need rounding to make
1486 // stack properly aligned.
1487 stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1488
1489 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1490
1491 // First thing make an ic check to see if we should even be here
1492
1493 // We are free to use all registers as temps without saving them and
1494 // restoring them except rfp. rfp is the only callee save register
1495 // as far as the interpreter and the compiler(s) are concerned.
1496
1497
1498 const Register ic_reg = rscratch2;
1499 const Register receiver = j_rarg0;
1500
1501 Label hit;
1502 Label exception_pending;
1503
1504 assert_different_registers(ic_reg, receiver, rscratch1);
1505 __ verify_oop(receiver);
1506 __ cmp_klass(receiver, ic_reg, rscratch1);
1507 __ br(Assembler::EQ, hit);
1508
1509 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1510
1511 // Verified entry point must be aligned
1512 __ align(8);
1513
1514 __ bind(hit);
1515
1516 int vep_offset = ((intptr_t)__ pc()) - start;
1517
1518 // If we have to make this method not-entrant we'll overwrite its
1519 // first instruction with a jump. For this action to be legal we
1520 // must ensure that this first instruction is a B, BL, NOP, BKPT,
1521 // SVC, HVC, or SMC. Make it a NOP.
1522 __ nop();
1523
1524 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1525 Label L_skip_barrier;
1526 __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1527 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1528 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1529
1530 __ bind(L_skip_barrier);
1531 }
1532
1533 // Generate stack overflow check
1534 __ bang_stack_with_offset(checked_cast<int>(StackOverflow::stack_shadow_zone_size()));
1535
1536 // Generate a new frame for the wrapper.
1537 __ enter();
1538 // -2 because return address is already present and so is saved rfp
1539 __ sub(sp, sp, stack_size - 2*wordSize);
1540
1541 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1542 bs->nmethod_entry_barrier(masm);
1543
1544 // Frame is now completed as far as size and linkage.
1545 int frame_complete = ((intptr_t)__ pc()) - start;
1546
1547 // We use r20 as the oop handle for the receiver/klass
1548 // It is callee save so it survives the call to native
1549
1550 const Register oop_handle_reg = r20;
1551
1552 //
1553 // We immediately shuffle the arguments so that any vm call we have to
1554 // make from here on out (sync slow path, jvmti, etc.) we will have
1555 // captured the oops from our caller and have a valid oopMap for
1556 // them.
1557
1558 // -----------------
1559 // The Grand Shuffle
1560
1561 // The Java calling convention is either equal (linux) or denser (win64) than the
1562 // c calling convention. However the because of the jni_env argument the c calling
1563 // convention always has at least one more (and two for static) arguments than Java.
1564 // Therefore if we move the args from java -> c backwards then we will never have
1565 // a register->register conflict and we don't have to build a dependency graph
1566 // and figure out how to break any cycles.
1567 //
1568
1569 // Record esp-based slot for receiver on stack for non-static methods
1570 int receiver_offset = -1;
1571
1572 // This is a trick. We double the stack slots so we can claim
1573 // the oops in the caller's frame. Since we are sure to have
1574 // more args than the caller doubling is enough to make
1575 // sure we can capture all the incoming oop args from the
1576 // caller.
1577 //
1578 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1579
1580 // Mark location of rfp (someday)
1581 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
1582
1583
1584 int float_args = 0;
1585 int int_args = 0;
1586
1587 #ifdef ASSERT
1588 bool reg_destroyed[RegisterImpl::number_of_registers];
1589 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
1590 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
1591 reg_destroyed[r] = false;
1592 }
1593 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
1594 freg_destroyed[f] = false;
1595 }
1596
1597 #endif /* ASSERT */
1598
1599 // This may iterate in two different directions depending on the
1600 // kind of native it is. The reason is that for regular JNI natives
1601 // the incoming and outgoing registers are offset upwards and for
1602 // critical natives they are offset down.
1603 GrowableArray<int> arg_order(2 * total_in_args);
1604 VMRegPair tmp_vmreg;
1605 tmp_vmreg.set2(r19->as_VMReg());
1606
1607 if (!is_critical_native) {
1608 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1609 arg_order.push(i);
1610 arg_order.push(c_arg);
1611 }
1612 } else {
1613 // Compute a valid move order, using tmp_vmreg to break any cycles
1614 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
1615 }
1616
1617 int temploc = -1;
1618 for (int ai = 0; ai < arg_order.length(); ai += 2) {
1619 int i = arg_order.at(ai);
1620 int c_arg = arg_order.at(ai + 1);
1621 __ block_comment(err_msg("move %d -> %d", i, c_arg));
1622 if (c_arg == -1) {
1623 assert(is_critical_native, "should only be required for critical natives");
1624 // This arg needs to be moved to a temporary
1625 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
1626 in_regs[i] = tmp_vmreg;
1627 temploc = i;
1628 continue;
1629 } else if (i == -1) {
1630 assert(is_critical_native, "should only be required for critical natives");
1631 // Read from the temporary location
1632 assert(temploc != -1, "must be valid");
1633 i = temploc;
1634 temploc = -1;
1635 }
1636 #ifdef ASSERT
1637 if (in_regs[i].first()->is_Register()) {
1638 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1639 } else if (in_regs[i].first()->is_FloatRegister()) {
1640 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1641 }
1642 if (out_regs[c_arg].first()->is_Register()) {
1643 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1644 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1645 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1646 }
1647 #endif /* ASSERT */
1648 switch (in_sig_bt[i]) {
1649 case T_ARRAY:
1650 if (is_critical_native) {
1651 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1652 c_arg++;
1653 #ifdef ASSERT
1654 if (out_regs[c_arg].first()->is_Register()) {
1655 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1656 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1657 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1658 }
1659 #endif
1660 int_args++;
1661 break;
1662 }
1663 case T_OBJECT:
1664 assert(!is_critical_native, "no oop arguments");
1665 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1666 ((i == 0) && (!is_static)),
1667 &receiver_offset);
1668 int_args++;
1669 break;
1670 case T_VOID:
1671 break;
1672
1673 case T_FLOAT:
1674 float_move(masm, in_regs[i], out_regs[c_arg]);
1675 float_args++;
1676 break;
1677
1678 case T_DOUBLE:
1679 assert( i + 1 < total_in_args &&
1680 in_sig_bt[i + 1] == T_VOID &&
1681 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1682 double_move(masm, in_regs[i], out_regs[c_arg]);
1683 float_args++;
1684 break;
1685
1686 case T_LONG :
1687 long_move(masm, in_regs[i], out_regs[c_arg]);
1688 int_args++;
1689 break;
1690
1691 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1692
1693 default:
1694 move32_64(masm, in_regs[i], out_regs[c_arg]);
1695 int_args++;
1696 }
1697 }
1698
1699 // point c_arg at the first arg that is already loaded in case we
1700 // need to spill before we call out
1701 int c_arg = total_c_args - total_in_args;
1702
1703 // Pre-load a static method's oop into c_rarg1.
1704 if (method->is_static() && !is_critical_native) {
1705
1706 // load oop into a register
1707 __ movoop(c_rarg1,
1708 JNIHandles::make_local(method->method_holder()->java_mirror()),
1709 /*immediate*/true);
1710
1711 // Now handlize the static class mirror it's known not-null.
1712 __ str(c_rarg1, Address(sp, klass_offset));
1713 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1714
1715 // Now get the handle
1716 __ lea(c_rarg1, Address(sp, klass_offset));
1717 // and protect the arg if we must spill
1718 c_arg--;
1719 }
1720
1721 // Change state to native (we save the return address in the thread, since it might not
1722 // be pushed on the stack when we do a stack traversal).
1723 // We use the same pc/oopMap repeatedly when we call out
1724
1725 Label native_return;
1726 __ set_last_Java_frame(sp, noreg, native_return, rscratch1);
1727
1728 Label dtrace_method_entry, dtrace_method_entry_done;
1729 {
1730 uint64_t offset;
1731 __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1732 __ ldrb(rscratch1, Address(rscratch1, offset));
1733 __ cbnzw(rscratch1, dtrace_method_entry);
1734 __ bind(dtrace_method_entry_done);
1735 }
1736
1737 // RedefineClasses() tracing support for obsolete method entry
1738 if (log_is_enabled(Trace, redefine, class, obsolete)) {
1739 // protect the args we've loaded
1740 save_args(masm, total_c_args, c_arg, out_regs);
1741 __ mov_metadata(c_rarg1, method());
1742 __ call_VM_leaf(
1743 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1744 rthread, c_rarg1);
1745 restore_args(masm, total_c_args, c_arg, out_regs);
1746 }
1747
1748 // Lock a synchronized method
1749
1750 // Register definitions used by locking and unlocking
1751
1752 const Register swap_reg = r0;
1753 const Register obj_reg = r19; // Will contain the oop
1754 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
1755 const Register old_hdr = r13; // value of old header at unlock time
1756 const Register tmp = lr;
1757
1758 Label slow_path_lock;
1759 Label lock_done;
1760
1761 if (method->is_synchronized()) {
1762 assert(!is_critical_native, "unhandled");
1763
1764 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1765
1766 // Get the handle (the 2nd argument)
1767 __ mov(oop_handle_reg, c_rarg1);
1768
1769 // Get address of the box
1770
1771 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1772
1773 // Load the oop from the handle
1774 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1775
1776 if (UseBiasedLocking) {
1777 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1778 }
1779
1780 // Load (object->mark() | 1) into swap_reg %r0
1781 __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1782 __ orr(swap_reg, rscratch1, 1);
1783
1784 // Save (object->mark() | 1) into BasicLock's displaced header
1785 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1786
1787 // src -> dest iff dest == r0 else r0 <- dest
1788 { Label here;
1789 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1790 }
1791
1792 // Hmm should this move to the slow path code area???
1793
1794 // Test if the oopMark is an obvious stack pointer, i.e.,
1795 // 1) (mark & 3) == 0, and
1796 // 2) sp <= mark < mark + os::pagesize()
1797 // These 3 tests can be done by evaluating the following
1798 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1799 // assuming both stack pointer and pagesize have their
1800 // least significant 2 bits clear.
1801 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1802
1803 __ sub(swap_reg, sp, swap_reg);
1804 __ neg(swap_reg, swap_reg);
1805 __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
1806
1807 // Save the test result, for recursive case, the result is zero
1808 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1809 __ br(Assembler::NE, slow_path_lock);
1810
1811 // Slow path will re-enter here
1812
1813 __ bind(lock_done);
1814 }
1815
1816
1817 // Finally just about ready to make the JNI call
1818
1819 // get JNIEnv* which is first argument to native
1820 if (!is_critical_native) {
1821 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1822
1823 // Now set thread in native
1824 __ mov(rscratch1, _thread_in_native);
1825 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1826 __ stlrw(rscratch1, rscratch2);
1827 }
1828
1829 rt_call(masm, native_func);
1830
1831 __ bind(native_return);
1832
1833 intptr_t return_pc = (intptr_t) __ pc();
1834 oop_maps->add_gc_map(return_pc - start, map);
1835
1836 // Unpack native results.
1837 switch (ret_type) {
1838 case T_BOOLEAN: __ c2bool(r0); break;
1839 case T_CHAR : __ ubfx(r0, r0, 0, 16); break;
1840 case T_BYTE : __ sbfx(r0, r0, 0, 8); break;
1841 case T_SHORT : __ sbfx(r0, r0, 0, 16); break;
1842 case T_INT : __ sbfx(r0, r0, 0, 32); break;
1843 case T_DOUBLE :
1844 case T_FLOAT :
1845 // Result is in v0 we'll save as needed
1846 break;
1847 case T_ARRAY: // Really a handle
1848 case T_OBJECT: // Really a handle
1849 break; // can't de-handlize until after safepoint check
1850 case T_VOID: break;
1851 case T_LONG: break;
1852 default : ShouldNotReachHere();
1853 }
1854
1855 Label safepoint_in_progress, safepoint_in_progress_done;
1856 Label after_transition;
1857
1858 // If this is a critical native, check for a safepoint or suspend request after the call.
1859 // If a safepoint is needed, transition to native, then to native_trans to handle
1860 // safepoints like the native methods that are not critical natives.
1861 if (is_critical_native) {
1862 Label needs_safepoint;
1863 __ safepoint_poll(needs_safepoint, false /* at_return */, true /* acquire */, false /* in_nmethod */);
1864 __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1865 __ cbnzw(rscratch1, needs_safepoint);
1866 __ b(after_transition);
1867 __ bind(needs_safepoint);
1868 }
1869
1870 // Switch thread to "native transition" state before reading the synchronization state.
1871 // This additional state is necessary because reading and testing the synchronization
1872 // state is not atomic w.r.t. GC, as this scenario demonstrates:
1873 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1874 // VM thread changes sync state to synchronizing and suspends threads for GC.
1875 // Thread A is resumed to finish this native method, but doesn't block here since it
1876 // didn't see any synchronization is progress, and escapes.
1877 __ mov(rscratch1, _thread_in_native_trans);
1878
1879 __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1880
1881 // Force this write out before the read below
1882 __ dmb(Assembler::ISH);
1883
1884 __ verify_sve_vector_length();
1885
1886 // Check for safepoint operation in progress and/or pending suspend requests.
1887 {
1888 // We need an acquire here to ensure that any subsequent load of the
1889 // global SafepointSynchronize::_state flag is ordered after this load
1890 // of the thread-local polling word. We don't want this poll to
1891 // return false (i.e. not safepointing) and a later poll of the global
1892 // SafepointSynchronize::_state spuriously to return true.
1893 //
1894 // This is to avoid a race when we're in a native->Java transition
1895 // racing the code which wakes up from a safepoint.
1896
1897 __ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
1898 __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1899 __ cbnzw(rscratch1, safepoint_in_progress);
1900 __ bind(safepoint_in_progress_done);
1901 }
1902
1903 // change thread state
1904 __ mov(rscratch1, _thread_in_Java);
1905 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1906 __ stlrw(rscratch1, rscratch2);
1907 __ bind(after_transition);
1908
1909 Label reguard;
1910 Label reguard_done;
1911 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1912 __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1913 __ br(Assembler::EQ, reguard);
1914 __ bind(reguard_done);
1915
1916 // native result if any is live
1917
1918 // Unlock
1919 Label unlock_done;
1920 Label slow_path_unlock;
1921 if (method->is_synchronized()) {
1922
1923 // Get locked oop from the handle we passed to jni
1924 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1925
1926 Label done;
1927
1928 if (UseBiasedLocking) {
1929 __ biased_locking_exit(obj_reg, old_hdr, done);
1930 }
1931
1932 // Simple recursive lock?
1933
1934 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1935 __ cbz(rscratch1, done);
1936
1937 // Must save r0 if if it is live now because cmpxchg must use it
1938 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1939 save_native_result(masm, ret_type, stack_slots);
1940 }
1941
1942
1943 // get address of the stack lock
1944 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1945 // get old displaced header
1946 __ ldr(old_hdr, Address(r0, 0));
1947
1948 // Atomic swap old header if oop still contains the stack lock
1949 Label succeed;
1950 __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
1951 __ bind(succeed);
1952
1953 // slow path re-enters here
1954 __ bind(unlock_done);
1955 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1956 restore_native_result(masm, ret_type, stack_slots);
1957 }
1958
1959 __ bind(done);
1960 }
1961
1962 Label dtrace_method_exit, dtrace_method_exit_done;
1963 {
1964 uint64_t offset;
1965 __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1966 __ ldrb(rscratch1, Address(rscratch1, offset));
1967 __ cbnzw(rscratch1, dtrace_method_exit);
1968 __ bind(dtrace_method_exit_done);
1969 }
1970
1971 __ reset_last_Java_frame(false);
1972
1973 // Unbox oop result, e.g. JNIHandles::resolve result.
1974 if (is_reference_type(ret_type)) {
1975 __ resolve_jobject(r0, rthread, rscratch2);
1976 }
1977
1978 if (CheckJNICalls) {
1979 // clear_pending_jni_exception_check
1980 __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1981 }
1982
1983 if (!is_critical_native) {
1984 // reset handle block
1985 __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
1986 __ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
1987 }
1988
1989 __ leave();
1990
1991 if (!is_critical_native) {
1992 // Any exception pending?
1993 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1994 __ cbnz(rscratch1, exception_pending);
1995 }
1996
1997 // We're done
1998 __ ret(lr);
1999
2000 // Unexpected paths are out of line and go here
2001
2002 if (!is_critical_native) {
2003 // forward the exception
2004 __ bind(exception_pending);
2005
2006 // and forward the exception
2007 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2008 }
2009
2010 // Slow path locking & unlocking
2011 if (method->is_synchronized()) {
2012
2013 __ block_comment("Slow path lock {");
2014 __ bind(slow_path_lock);
2015
2016 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2017 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2018
2019 // protect the args we've loaded
2020 save_args(masm, total_c_args, c_arg, out_regs);
2021
2022 __ mov(c_rarg0, obj_reg);
2023 __ mov(c_rarg1, lock_reg);
2024 __ mov(c_rarg2, rthread);
2025
2026 // Not a leaf but we have last_Java_frame setup as we want
2027 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2028 restore_args(masm, total_c_args, c_arg, out_regs);
2029
2030 #ifdef ASSERT
2031 { Label L;
2032 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2033 __ cbz(rscratch1, L);
2034 __ stop("no pending exception allowed on exit from monitorenter");
2035 __ bind(L);
2036 }
2037 #endif
2038 __ b(lock_done);
2039
2040 __ block_comment("} Slow path lock");
2041
2042 __ block_comment("Slow path unlock {");
2043 __ bind(slow_path_unlock);
2044
2045 // If we haven't already saved the native result we must save it now as xmm registers
2046 // are still exposed.
2047
2048 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2049 save_native_result(masm, ret_type, stack_slots);
2050 }
2051
2052 __ mov(c_rarg2, rthread);
2053 __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2054 __ mov(c_rarg0, obj_reg);
2055
2056 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2057 // NOTE that obj_reg == r19 currently
2058 __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2059 __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2060
2061 rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
2062
2063 #ifdef ASSERT
2064 {
2065 Label L;
2066 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2067 __ cbz(rscratch1, L);
2068 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2069 __ bind(L);
2070 }
2071 #endif /* ASSERT */
2072
2073 __ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
2074
2075 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2076 restore_native_result(masm, ret_type, stack_slots);
2077 }
2078 __ b(unlock_done);
2079
2080 __ block_comment("} Slow path unlock");
2081
2082 } // synchronized
2083
2084 // SLOW PATH Reguard the stack if needed
2085
2086 __ bind(reguard);
2087 save_native_result(masm, ret_type, stack_slots);
2088 rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2089 restore_native_result(masm, ret_type, stack_slots);
2090 // and continue
2091 __ b(reguard_done);
2092
2093 // SLOW PATH safepoint
2094 {
2095 __ block_comment("safepoint {");
2096 __ bind(safepoint_in_progress);
2097
2098 // Don't use call_VM as it will see a possible pending exception and forward it
2099 // and never return here preventing us from clearing _last_native_pc down below.
2100 //
2101 save_native_result(masm, ret_type, stack_slots);
2102 __ mov(c_rarg0, rthread);
2103 #ifndef PRODUCT
2104 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2105 #endif
2106 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2107 __ blr(rscratch1);
2108
2109 // Restore any method result value
2110 restore_native_result(masm, ret_type, stack_slots);
2111
2112 __ b(safepoint_in_progress_done);
2113 __ block_comment("} safepoint");
2114 }
2115
2116 // SLOW PATH dtrace support
2117 {
2118 __ block_comment("dtrace entry {");
2119 __ bind(dtrace_method_entry);
2120
2121 // We have all of the arguments setup at this point. We must not touch any register
2122 // argument registers at this point (what if we save/restore them there are no oop?
2123
2124 save_args(masm, total_c_args, c_arg, out_regs);
2125 __ mov_metadata(c_rarg1, method());
2126 __ call_VM_leaf(
2127 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2128 rthread, c_rarg1);
2129 restore_args(masm, total_c_args, c_arg, out_regs);
2130 __ b(dtrace_method_entry_done);
2131 __ block_comment("} dtrace entry");
2132 }
2133
2134 {
2135 __ block_comment("dtrace exit {");
2136 __ bind(dtrace_method_exit);
2137 save_native_result(masm, ret_type, stack_slots);
2138 __ mov_metadata(c_rarg1, method());
2139 __ call_VM_leaf(
2140 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2141 rthread, c_rarg1);
2142 restore_native_result(masm, ret_type, stack_slots);
2143 __ b(dtrace_method_exit_done);
2144 __ block_comment("} dtrace exit");
2145 }
2146
2147
2148 __ flush();
2149
2150 nmethod *nm = nmethod::new_native_nmethod(method,
2151 compile_id,
2152 masm->code(),
2153 vep_offset,
2154 frame_complete,
2155 stack_slots / VMRegImpl::slots_per_word,
2156 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2157 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2158 oop_maps);
2159
2160 return nm;
2161 }
2162
2163 // this function returns the adjust size (in number of words) to a c2i adapter
2164 // activation for use during deoptimization
last_frame_adjust(int callee_parameters,int callee_locals)2165 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2166 assert(callee_locals >= callee_parameters,
2167 "test and remove; got more parms than locals");
2168 if (callee_locals < callee_parameters)
2169 return 0; // No adjustment for negative locals
2170 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2171 // diff is counted in stack words
2172 return align_up(diff, 2);
2173 }
2174
2175
2176 //------------------------------generate_deopt_blob----------------------------
generate_deopt_blob()2177 void SharedRuntime::generate_deopt_blob() {
2178 // Allocate space for the code
2179 ResourceMark rm;
2180 // Setup code generation tools
2181 int pad = 0;
2182 #if INCLUDE_JVMCI
2183 if (EnableJVMCI) {
2184 pad += 512; // Increase the buffer size when compiling for JVMCI
2185 }
2186 #endif
2187 CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
2188 MacroAssembler* masm = new MacroAssembler(&buffer);
2189 int frame_size_in_words;
2190 OopMap* map = NULL;
2191 OopMapSet *oop_maps = new OopMapSet();
2192 RegisterSaver reg_save(COMPILER2_OR_JVMCI != 0);
2193
2194 // -------------
2195 // This code enters when returning to a de-optimized nmethod. A return
2196 // address has been pushed on the the stack, and return values are in
2197 // registers.
2198 // If we are doing a normal deopt then we were called from the patched
2199 // nmethod from the point we returned to the nmethod. So the return
2200 // address on the stack is wrong by NativeCall::instruction_size
2201 // We will adjust the value so it looks like we have the original return
2202 // address on the stack (like when we eagerly deoptimized).
2203 // In the case of an exception pending when deoptimizing, we enter
2204 // with a return address on the stack that points after the call we patched
2205 // into the exception handler. We have the following register state from,
2206 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2207 // r0: exception oop
2208 // r19: exception handler
2209 // r3: throwing pc
2210 // So in this case we simply jam r3 into the useless return address and
2211 // the stack looks just like we want.
2212 //
2213 // At this point we need to de-opt. We save the argument return
2214 // registers. We call the first C routine, fetch_unroll_info(). This
2215 // routine captures the return values and returns a structure which
2216 // describes the current frame size and the sizes of all replacement frames.
2217 // The current frame is compiled code and may contain many inlined
2218 // functions, each with their own JVM state. We pop the current frame, then
2219 // push all the new frames. Then we call the C routine unpack_frames() to
2220 // populate these frames. Finally unpack_frames() returns us the new target
2221 // address. Notice that callee-save registers are BLOWN here; they have
2222 // already been captured in the vframeArray at the time the return PC was
2223 // patched.
2224 address start = __ pc();
2225 Label cont;
2226
2227 // Prolog for non exception case!
2228
2229 // Save everything in sight.
2230 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2231
2232 // Normal deoptimization. Save exec mode for unpack_frames.
2233 __ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved
2234 __ b(cont);
2235
2236 int reexecute_offset = __ pc() - start;
2237 #if INCLUDE_JVMCI && !defined(COMPILER1)
2238 if (EnableJVMCI && UseJVMCICompiler) {
2239 // JVMCI does not use this kind of deoptimization
2240 __ should_not_reach_here();
2241 }
2242 #endif
2243
2244 // Reexecute case
2245 // return address is the pc describes what bci to do re-execute at
2246
2247 // No need to update map as each call to save_live_registers will produce identical oopmap
2248 (void) reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2249
2250 __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
2251 __ b(cont);
2252
2253 #if INCLUDE_JVMCI
2254 Label after_fetch_unroll_info_call;
2255 int implicit_exception_uncommon_trap_offset = 0;
2256 int uncommon_trap_offset = 0;
2257
2258 if (EnableJVMCI) {
2259 implicit_exception_uncommon_trap_offset = __ pc() - start;
2260
2261 __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2262 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2263
2264 uncommon_trap_offset = __ pc() - start;
2265
2266 // Save everything in sight.
2267 reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2268 // fetch_unroll_info needs to call last_java_frame()
2269 Label retaddr;
2270 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2271
2272 __ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2273 __ movw(rscratch1, -1);
2274 __ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
2275
2276 __ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
2277 __ mov(c_rarg0, rthread);
2278 __ movw(c_rarg2, rcpool); // exec mode
2279 __ lea(rscratch1,
2280 RuntimeAddress(CAST_FROM_FN_PTR(address,
2281 Deoptimization::uncommon_trap)));
2282 __ blr(rscratch1);
2283 __ bind(retaddr);
2284 oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2285
2286 __ reset_last_Java_frame(false);
2287
2288 __ b(after_fetch_unroll_info_call);
2289 } // EnableJVMCI
2290 #endif // INCLUDE_JVMCI
2291
2292 int exception_offset = __ pc() - start;
2293
2294 // Prolog for exception case
2295
2296 // all registers are dead at this entry point, except for r0, and
2297 // r3 which contain the exception oop and exception pc
2298 // respectively. Set them in TLS and fall thru to the
2299 // unpack_with_exception_in_tls entry point.
2300
2301 __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
2302 __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
2303
2304 int exception_in_tls_offset = __ pc() - start;
2305
2306 // new implementation because exception oop is now passed in JavaThread
2307
2308 // Prolog for exception case
2309 // All registers must be preserved because they might be used by LinearScan
2310 // Exceptiop oop and throwing PC are passed in JavaThread
2311 // tos: stack at point of call to method that threw the exception (i.e. only
2312 // args are on the stack, no return address)
2313
2314 // The return address pushed by save_live_registers will be patched
2315 // later with the throwing pc. The correct value is not available
2316 // now because loading it from memory would destroy registers.
2317
2318 // NB: The SP at this point must be the SP of the method that is
2319 // being deoptimized. Deoptimization assumes that the frame created
2320 // here by save_live_registers is immediately below the method's SP.
2321 // This is a somewhat fragile mechanism.
2322
2323 // Save everything in sight.
2324 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2325
2326 // Now it is safe to overwrite any register
2327
2328 // Deopt during an exception. Save exec mode for unpack_frames.
2329 __ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved
2330
2331 // load throwing pc from JavaThread and patch it as the return address
2332 // of the current frame. Then clear the field in JavaThread
2333
2334 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2335 __ str(r3, Address(rfp, wordSize));
2336 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2337
2338 #ifdef ASSERT
2339 // verify that there is really an exception oop in JavaThread
2340 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2341 __ verify_oop(r0);
2342
2343 // verify that there is no pending exception
2344 Label no_pending_exception;
2345 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2346 __ cbz(rscratch1, no_pending_exception);
2347 __ stop("must not have pending exception here");
2348 __ bind(no_pending_exception);
2349 #endif
2350
2351 __ bind(cont);
2352
2353 // Call C code. Need thread and this frame, but NOT official VM entry
2354 // crud. We cannot block on this call, no GC can happen.
2355 //
2356 // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2357
2358 // fetch_unroll_info needs to call last_java_frame().
2359
2360 Label retaddr;
2361 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2362 #ifdef ASSERT0
2363 { Label L;
2364 __ ldr(rscratch1, Address(rthread,
2365 JavaThread::last_Java_fp_offset()));
2366 __ cbz(rscratch1, L);
2367 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2368 __ bind(L);
2369 }
2370 #endif // ASSERT
2371 __ mov(c_rarg0, rthread);
2372 __ mov(c_rarg1, rcpool);
2373 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2374 __ blr(rscratch1);
2375 __ bind(retaddr);
2376
2377 // Need to have an oopmap that tells fetch_unroll_info where to
2378 // find any register it might need.
2379 oop_maps->add_gc_map(__ pc() - start, map);
2380
2381 __ reset_last_Java_frame(false);
2382
2383 #if INCLUDE_JVMCI
2384 if (EnableJVMCI) {
2385 __ bind(after_fetch_unroll_info_call);
2386 }
2387 #endif
2388
2389 // Load UnrollBlock* into r5
2390 __ mov(r5, r0);
2391
2392 __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2393 Label noException;
2394 __ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending?
2395 __ br(Assembler::NE, noException);
2396 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
2397 // QQQ this is useless it was NULL above
2398 __ ldr(r3, Address(rthread, JavaThread::exception_pc_offset()));
2399 __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2400 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2401
2402 __ verify_oop(r0);
2403
2404 // Overwrite the result registers with the exception results.
2405 __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2406 // I think this is useless
2407 // __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2408
2409 __ bind(noException);
2410
2411 // Only register save data is on the stack.
2412 // Now restore the result registers. Everything else is either dead
2413 // or captured in the vframeArray.
2414
2415 // Restore fp result register
2416 __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2417 // Restore integer result register
2418 __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2419
2420 // Pop all of the register save area off the stack
2421 __ add(sp, sp, frame_size_in_words * wordSize);
2422
2423 // All of the register save area has been popped of the stack. Only the
2424 // return address remains.
2425
2426 // Pop all the frames we must move/replace.
2427 //
2428 // Frame picture (youngest to oldest)
2429 // 1: self-frame (no frame link)
2430 // 2: deopting frame (no frame link)
2431 // 3: caller of deopting frame (could be compiled/interpreted).
2432 //
2433 // Note: by leaving the return address of self-frame on the stack
2434 // and using the size of frame 2 to adjust the stack
2435 // when we are done the return to frame 3 will still be on the stack.
2436
2437 // Pop deoptimized frame
2438 __ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2439 __ sub(r2, r2, 2 * wordSize);
2440 __ add(sp, sp, r2);
2441 __ ldp(rfp, lr, __ post(sp, 2 * wordSize));
2442 // LR should now be the return address to the caller (3)
2443
2444 #ifdef ASSERT
2445 // Compilers generate code that bang the stack by as much as the
2446 // interpreter would need. So this stack banging should never
2447 // trigger a fault. Verify that it does not on non product builds.
2448 __ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2449 __ bang_stack_size(r19, r2);
2450 #endif
2451 // Load address of array of frame pcs into r2
2452 __ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2453
2454 // Trash the old pc
2455 // __ addptr(sp, wordSize); FIXME ????
2456
2457 // Load address of array of frame sizes into r4
2458 __ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2459
2460 // Load counter into r3
2461 __ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2462
2463 // Now adjust the caller's stack to make up for the extra locals
2464 // but record the original sp so that we can save it in the skeletal interpreter
2465 // frame and the stack walking of interpreter_sender will get the unextended sp
2466 // value and not the "real" sp value.
2467
2468 const Register sender_sp = r6;
2469
2470 __ mov(sender_sp, sp);
2471 __ ldrw(r19, Address(r5,
2472 Deoptimization::UnrollBlock::
2473 caller_adjustment_offset_in_bytes()));
2474 __ sub(sp, sp, r19);
2475
2476 // Push interpreter frames in a loop
2477 __ mov(rscratch1, (uint64_t)0xDEADDEAD); // Make a recognizable pattern
2478 __ mov(rscratch2, rscratch1);
2479 Label loop;
2480 __ bind(loop);
2481 __ ldr(r19, Address(__ post(r4, wordSize))); // Load frame size
2482 __ sub(r19, r19, 2*wordSize); // We'll push pc and fp by hand
2483 __ ldr(lr, Address(__ post(r2, wordSize))); // Load pc
2484 __ enter(); // Save old & set new fp
2485 __ sub(sp, sp, r19); // Prolog
2486 // This value is corrected by layout_activation_impl
2487 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2488 __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2489 __ mov(sender_sp, sp); // Pass sender_sp to next frame
2490 __ sub(r3, r3, 1); // Decrement counter
2491 __ cbnz(r3, loop);
2492
2493 // Re-push self-frame
2494 __ ldr(lr, Address(r2));
2495 __ enter();
2496
2497 // Allocate a full sized register save area. We subtract 2 because
2498 // enter() just pushed 2 words
2499 __ sub(sp, sp, (frame_size_in_words - 2) * wordSize);
2500
2501 // Restore frame locals after moving the frame
2502 __ strd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2503 __ str(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2504
2505 // Call C code. Need thread but NOT official VM entry
2506 // crud. We cannot block on this call, no GC can happen. Call should
2507 // restore return values to their stack-slots with the new SP.
2508 //
2509 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
2510
2511 // Use rfp because the frames look interpreted now
2512 // Don't need the precise return PC here, just precise enough to point into this code blob.
2513 address the_pc = __ pc();
2514 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2515
2516 __ mov(c_rarg0, rthread);
2517 __ movw(c_rarg1, rcpool); // second arg: exec_mode
2518 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2519 __ blr(rscratch1);
2520
2521 // Set an oopmap for the call site
2522 // Use the same PC we used for the last java frame
2523 oop_maps->add_gc_map(the_pc - start,
2524 new OopMap( frame_size_in_words, 0 ));
2525
2526 // Clear fp AND pc
2527 __ reset_last_Java_frame(true);
2528
2529 // Collect return values
2530 __ ldrd(v0, Address(sp, reg_save.v0_offset_in_bytes()));
2531 __ ldr(r0, Address(sp, reg_save.r0_offset_in_bytes()));
2532 // I think this is useless (throwing pc?)
2533 // __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes()));
2534
2535 // Pop self-frame.
2536 __ leave(); // Epilog
2537
2538 // Jump to interpreter
2539 __ ret(lr);
2540
2541 // Make sure all code is generated
2542 masm->flush();
2543
2544 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2545 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2546 #if INCLUDE_JVMCI
2547 if (EnableJVMCI) {
2548 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2549 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2550 }
2551 #endif
2552 }
2553
2554 // Number of stack slots between incoming argument block and the start of
2555 // a new frame. The PROLOG must add this many slots to the stack. The
2556 // EPILOG must remove this many slots. aarch64 needs two slots for
2557 // return address and fp.
2558 // TODO think this is correct but check
in_preserve_stack_slots()2559 uint SharedRuntime::in_preserve_stack_slots() {
2560 return 4;
2561 }
2562
out_preserve_stack_slots()2563 uint SharedRuntime::out_preserve_stack_slots() {
2564 return 0;
2565 }
2566
2567 #ifdef COMPILER2
2568 //------------------------------generate_uncommon_trap_blob--------------------
generate_uncommon_trap_blob()2569 void SharedRuntime::generate_uncommon_trap_blob() {
2570 // Allocate space for the code
2571 ResourceMark rm;
2572 // Setup code generation tools
2573 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2574 MacroAssembler* masm = new MacroAssembler(&buffer);
2575
2576 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2577
2578 address start = __ pc();
2579
2580 // Push self-frame. We get here with a return address in LR
2581 // and sp should be 16 byte aligned
2582 // push rfp and retaddr by hand
2583 __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2584 // we don't expect an arg reg save area
2585 #ifndef PRODUCT
2586 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
2587 #endif
2588 // compiler left unloaded_class_index in j_rarg0 move to where the
2589 // runtime expects it.
2590 if (c_rarg1 != j_rarg0) {
2591 __ movw(c_rarg1, j_rarg0);
2592 }
2593
2594 // we need to set the past SP to the stack pointer of the stub frame
2595 // and the pc to the address where this runtime call will return
2596 // although actually any pc in this code blob will do).
2597 Label retaddr;
2598 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2599
2600 // Call C code. Need thread but NOT official VM entry
2601 // crud. We cannot block on this call, no GC can happen. Call should
2602 // capture callee-saved registers as well as return values.
2603 // Thread is in rdi already.
2604 //
2605 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
2606 //
2607 // n.b. 2 gp args, 0 fp args, integral return type
2608
2609 __ mov(c_rarg0, rthread);
2610 __ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
2611 __ lea(rscratch1,
2612 RuntimeAddress(CAST_FROM_FN_PTR(address,
2613 Deoptimization::uncommon_trap)));
2614 __ blr(rscratch1);
2615 __ bind(retaddr);
2616
2617 // Set an oopmap for the call site
2618 OopMapSet* oop_maps = new OopMapSet();
2619 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
2620
2621 // location of rfp is known implicitly by the frame sender code
2622
2623 oop_maps->add_gc_map(__ pc() - start, map);
2624
2625 __ reset_last_Java_frame(false);
2626
2627 // move UnrollBlock* into r4
2628 __ mov(r4, r0);
2629
2630 #ifdef ASSERT
2631 { Label L;
2632 __ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2633 __ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2634 __ br(Assembler::EQ, L);
2635 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2636 __ bind(L);
2637 }
2638 #endif
2639
2640 // Pop all the frames we must move/replace.
2641 //
2642 // Frame picture (youngest to oldest)
2643 // 1: self-frame (no frame link)
2644 // 2: deopting frame (no frame link)
2645 // 3: caller of deopting frame (could be compiled/interpreted).
2646
2647 // Pop self-frame. We have no frame, and must rely only on r0 and sp.
2648 __ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog!
2649
2650 // Pop deoptimized frame (int)
2651 __ ldrw(r2, Address(r4,
2652 Deoptimization::UnrollBlock::
2653 size_of_deoptimized_frame_offset_in_bytes()));
2654 __ sub(r2, r2, 2 * wordSize);
2655 __ add(sp, sp, r2);
2656 __ ldp(rfp, lr, __ post(sp, 2 * wordSize));
2657 // LR should now be the return address to the caller (3) frame
2658
2659 #ifdef ASSERT
2660 // Compilers generate code that bang the stack by as much as the
2661 // interpreter would need. So this stack banging should never
2662 // trigger a fault. Verify that it does not on non product builds.
2663 __ ldrw(r1, Address(r4,
2664 Deoptimization::UnrollBlock::
2665 total_frame_sizes_offset_in_bytes()));
2666 __ bang_stack_size(r1, r2);
2667 #endif
2668
2669 // Load address of array of frame pcs into r2 (address*)
2670 __ ldr(r2, Address(r4,
2671 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2672
2673 // Load address of array of frame sizes into r5 (intptr_t*)
2674 __ ldr(r5, Address(r4,
2675 Deoptimization::UnrollBlock::
2676 frame_sizes_offset_in_bytes()));
2677
2678 // Counter
2679 __ ldrw(r3, Address(r4,
2680 Deoptimization::UnrollBlock::
2681 number_of_frames_offset_in_bytes())); // (int)
2682
2683 // Now adjust the caller's stack to make up for the extra locals but
2684 // record the original sp so that we can save it in the skeletal
2685 // interpreter frame and the stack walking of interpreter_sender
2686 // will get the unextended sp value and not the "real" sp value.
2687
2688 const Register sender_sp = r8;
2689
2690 __ mov(sender_sp, sp);
2691 __ ldrw(r1, Address(r4,
2692 Deoptimization::UnrollBlock::
2693 caller_adjustment_offset_in_bytes())); // (int)
2694 __ sub(sp, sp, r1);
2695
2696 // Push interpreter frames in a loop
2697 Label loop;
2698 __ bind(loop);
2699 __ ldr(r1, Address(r5, 0)); // Load frame size
2700 __ sub(r1, r1, 2 * wordSize); // We'll push pc and rfp by hand
2701 __ ldr(lr, Address(r2, 0)); // Save return address
2702 __ enter(); // and old rfp & set new rfp
2703 __ sub(sp, sp, r1); // Prolog
2704 __ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable
2705 // This value is corrected by layout_activation_impl
2706 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
2707 __ mov(sender_sp, sp); // Pass sender_sp to next frame
2708 __ add(r5, r5, wordSize); // Bump array pointer (sizes)
2709 __ add(r2, r2, wordSize); // Bump array pointer (pcs)
2710 __ subsw(r3, r3, 1); // Decrement counter
2711 __ br(Assembler::GT, loop);
2712 __ ldr(lr, Address(r2, 0)); // save final return address
2713 // Re-push self-frame
2714 __ enter(); // & old rfp & set new rfp
2715
2716 // Use rfp because the frames look interpreted now
2717 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
2718 // Don't need the precise return PC here, just precise enough to point into this code blob.
2719 address the_pc = __ pc();
2720 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2721
2722 // Call C code. Need thread but NOT official VM entry
2723 // crud. We cannot block on this call, no GC can happen. Call should
2724 // restore return values to their stack-slots with the new SP.
2725 // Thread is in rdi already.
2726 //
2727 // BasicType unpack_frames(JavaThread* thread, int exec_mode);
2728 //
2729 // n.b. 2 gp args, 0 fp args, integral return type
2730
2731 // sp should already be aligned
2732 __ mov(c_rarg0, rthread);
2733 __ movw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap);
2734 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2735 __ blr(rscratch1);
2736
2737 // Set an oopmap for the call site
2738 // Use the same PC we used for the last java frame
2739 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
2740
2741 // Clear fp AND pc
2742 __ reset_last_Java_frame(true);
2743
2744 // Pop self-frame.
2745 __ leave(); // Epilog
2746
2747 // Jump to interpreter
2748 __ ret(lr);
2749
2750 // Make sure all code is generated
2751 masm->flush();
2752
2753 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps,
2754 SimpleRuntimeFrame::framesize >> 1);
2755 }
2756 #endif // COMPILER2
2757
2758
2759 //------------------------------generate_handler_blob------
2760 //
2761 // Generate a special Compile2Runtime blob that saves all registers,
2762 // and setup oopmap.
2763 //
generate_handler_blob(address call_ptr,int poll_type)2764 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
2765 ResourceMark rm;
2766 OopMapSet *oop_maps = new OopMapSet();
2767 OopMap* map;
2768
2769 // Allocate space for the code. Setup code generation tools.
2770 CodeBuffer buffer("handler_blob", 2048, 1024);
2771 MacroAssembler* masm = new MacroAssembler(&buffer);
2772
2773 address start = __ pc();
2774 address call_pc = NULL;
2775 int frame_size_in_words;
2776 bool cause_return = (poll_type == POLL_AT_RETURN);
2777 RegisterSaver reg_save(poll_type == POLL_AT_VECTOR_LOOP /* save_vectors */);
2778
2779 // Save Integer and Float registers.
2780 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2781
2782 // The following is basically a call_VM. However, we need the precise
2783 // address of the call in order to generate an oopmap. Hence, we do all the
2784 // work outselves.
2785
2786 Label retaddr;
2787 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2788
2789 // The return address must always be correct so that frame constructor never
2790 // sees an invalid pc.
2791
2792 if (!cause_return) {
2793 // overwrite the return address pushed by save_live_registers
2794 // Additionally, r20 is a callee-saved register so we can look at
2795 // it later to determine if someone changed the return address for
2796 // us!
2797 __ ldr(r20, Address(rthread, JavaThread::saved_exception_pc_offset()));
2798 __ str(r20, Address(rfp, wordSize));
2799 }
2800
2801 // Do the call
2802 __ mov(c_rarg0, rthread);
2803 __ lea(rscratch1, RuntimeAddress(call_ptr));
2804 __ blr(rscratch1);
2805 __ bind(retaddr);
2806
2807 // Set an oopmap for the call site. This oopmap will map all
2808 // oop-registers and debug-info registers as callee-saved. This
2809 // will allow deoptimization at this safepoint to find all possible
2810 // debug-info recordings, as well as let GC find all oops.
2811
2812 oop_maps->add_gc_map( __ pc() - start, map);
2813
2814 Label noException;
2815
2816 __ reset_last_Java_frame(false);
2817
2818 __ membar(Assembler::LoadLoad | Assembler::LoadStore);
2819
2820 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2821 __ cbz(rscratch1, noException);
2822
2823 // Exception pending
2824
2825 reg_save.restore_live_registers(masm);
2826
2827 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2828
2829 // No exception case
2830 __ bind(noException);
2831
2832 Label no_adjust, bail;
2833 if (!cause_return) {
2834 // If our stashed return pc was modified by the runtime we avoid touching it
2835 __ ldr(rscratch1, Address(rfp, wordSize));
2836 __ cmp(r20, rscratch1);
2837 __ br(Assembler::NE, no_adjust);
2838
2839 #ifdef ASSERT
2840 // Verify the correct encoding of the poll we're about to skip.
2841 // See NativeInstruction::is_ldrw_to_zr()
2842 __ ldrw(rscratch1, Address(r20));
2843 __ ubfx(rscratch2, rscratch1, 22, 10);
2844 __ cmpw(rscratch2, 0b1011100101);
2845 __ br(Assembler::NE, bail);
2846 __ ubfx(rscratch2, rscratch1, 0, 5);
2847 __ cmpw(rscratch2, 0b11111);
2848 __ br(Assembler::NE, bail);
2849 #endif
2850 // Adjust return pc forward to step over the safepoint poll instruction
2851 __ add(r20, r20, NativeInstruction::instruction_size);
2852 __ str(r20, Address(rfp, wordSize));
2853 }
2854
2855 __ bind(no_adjust);
2856 // Normal exit, restore registers and exit.
2857 reg_save.restore_live_registers(masm);
2858
2859 __ ret(lr);
2860
2861 #ifdef ASSERT
2862 __ bind(bail);
2863 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
2864 #endif
2865
2866 // Make sure all code is generated
2867 masm->flush();
2868
2869 // Fill-out other meta info
2870 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2871 }
2872
2873 //
2874 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2875 //
2876 // Generate a stub that calls into vm to find out the proper destination
2877 // of a java call. All the argument registers are live at this point
2878 // but since this is generic code we don't know what they are and the caller
2879 // must do any gc of the args.
2880 //
generate_resolve_blob(address destination,const char * name)2881 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
2882 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2883
2884 // allocate space for the code
2885 ResourceMark rm;
2886
2887 CodeBuffer buffer(name, 1000, 512);
2888 MacroAssembler* masm = new MacroAssembler(&buffer);
2889
2890 int frame_size_in_words;
2891 RegisterSaver reg_save(false /* save_vectors */);
2892
2893 OopMapSet *oop_maps = new OopMapSet();
2894 OopMap* map = NULL;
2895
2896 int start = __ offset();
2897
2898 map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
2899
2900 int frame_complete = __ offset();
2901
2902 {
2903 Label retaddr;
2904 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
2905
2906 __ mov(c_rarg0, rthread);
2907 __ lea(rscratch1, RuntimeAddress(destination));
2908
2909 __ blr(rscratch1);
2910 __ bind(retaddr);
2911 }
2912
2913 // Set an oopmap for the call site.
2914 // We need this not only for callee-saved registers, but also for volatile
2915 // registers that the compiler might be keeping live across a safepoint.
2916
2917 oop_maps->add_gc_map( __ offset() - start, map);
2918
2919 // r0 contains the address we are going to jump to assuming no exception got installed
2920
2921 // clear last_Java_sp
2922 __ reset_last_Java_frame(false);
2923 // check for pending exceptions
2924 Label pending;
2925 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2926 __ cbnz(rscratch1, pending);
2927
2928 // get the returned Method*
2929 __ get_vm_result_2(rmethod, rthread);
2930 __ str(rmethod, Address(sp, reg_save.reg_offset_in_bytes(rmethod)));
2931
2932 // r0 is where we want to jump, overwrite rscratch1 which is saved and scratch
2933 __ str(r0, Address(sp, reg_save.rscratch1_offset_in_bytes()));
2934 reg_save.restore_live_registers(masm);
2935
2936 // We are back the the original state on entry and ready to go.
2937
2938 __ br(rscratch1);
2939
2940 // Pending exception after the safepoint
2941
2942 __ bind(pending);
2943
2944 reg_save.restore_live_registers(masm);
2945
2946 // exception pending => remove activation and forward to exception handler
2947
2948 __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
2949
2950 __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2951 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2952
2953 // -------------
2954 // make sure all code is generated
2955 masm->flush();
2956
2957 // return the blob
2958 // frame_size_words or bytes??
2959 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2960 }
2961
2962 #ifdef COMPILER2
2963 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
2964 //
2965 //------------------------------generate_exception_blob---------------------------
2966 // creates exception blob at the end
2967 // Using exception blob, this code is jumped from a compiled method.
2968 // (see emit_exception_handler in x86_64.ad file)
2969 //
2970 // Given an exception pc at a call we call into the runtime for the
2971 // handler in this method. This handler might merely restore state
2972 // (i.e. callee save registers) unwind the frame and jump to the
2973 // exception handler for the nmethod if there is no Java level handler
2974 // for the nmethod.
2975 //
2976 // This code is entered with a jmp.
2977 //
2978 // Arguments:
2979 // r0: exception oop
2980 // r3: exception pc
2981 //
2982 // Results:
2983 // r0: exception oop
2984 // r3: exception pc in caller or ???
2985 // destination: exception handler of caller
2986 //
2987 // Note: the exception pc MUST be at a call (precise debug information)
2988 // Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved.
2989 //
2990
generate_exception_blob()2991 void OptoRuntime::generate_exception_blob() {
2992 assert(!OptoRuntime::is_callee_saved_register(R3_num), "");
2993 assert(!OptoRuntime::is_callee_saved_register(R0_num), "");
2994 assert(!OptoRuntime::is_callee_saved_register(R2_num), "");
2995
2996 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2997
2998 // Allocate space for the code
2999 ResourceMark rm;
3000 // Setup code generation tools
3001 CodeBuffer buffer("exception_blob", 2048, 1024);
3002 MacroAssembler* masm = new MacroAssembler(&buffer);
3003
3004 // TODO check various assumptions made here
3005 //
3006 // make sure we do so before running this
3007
3008 address start = __ pc();
3009
3010 // push rfp and retaddr by hand
3011 // Exception pc is 'return address' for stack walker
3012 __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
3013 // there are no callee save registers and we don't expect an
3014 // arg reg save area
3015 #ifndef PRODUCT
3016 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
3017 #endif
3018 // Store exception in Thread object. We cannot pass any arguments to the
3019 // handle_exception call, since we do not want to make any assumption
3020 // about the size of the frame where the exception happened in.
3021 __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
3022 __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
3023
3024 // This call does all the hard work. It checks if an exception handler
3025 // exists in the method.
3026 // If so, it returns the handler address.
3027 // If not, it prepares for stack-unwinding, restoring the callee-save
3028 // registers of the frame being removed.
3029 //
3030 // address OptoRuntime::handle_exception_C(JavaThread* thread)
3031 //
3032 // n.b. 1 gp arg, 0 fp args, integral return type
3033
3034 // the stack should always be aligned
3035 address the_pc = __ pc();
3036 __ set_last_Java_frame(sp, noreg, the_pc, rscratch1);
3037 __ mov(c_rarg0, rthread);
3038 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3039 __ blr(rscratch1);
3040 // handle_exception_C is a special VM call which does not require an explicit
3041 // instruction sync afterwards.
3042
3043 // May jump to SVE compiled code
3044 __ reinitialize_ptrue();
3045
3046 // Set an oopmap for the call site. This oopmap will only be used if we
3047 // are unwinding the stack. Hence, all locations will be dead.
3048 // Callee-saved registers will be the same as the frame above (i.e.,
3049 // handle_exception_stub), since they were restored when we got the
3050 // exception.
3051
3052 OopMapSet* oop_maps = new OopMapSet();
3053
3054 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3055
3056 __ reset_last_Java_frame(false);
3057
3058 // Restore callee-saved registers
3059
3060 // rfp is an implicitly saved callee saved register (i.e. the calling
3061 // convention will save restore it in prolog/epilog) Other than that
3062 // there are no callee save registers now that adapter frames are gone.
3063 // and we dont' expect an arg reg save area
3064 __ ldp(rfp, r3, Address(__ post(sp, 2 * wordSize)));
3065
3066 // r0: exception handler
3067
3068 // We have a handler in r0 (could be deopt blob).
3069 __ mov(r8, r0);
3070
3071 // Get the exception oop
3072 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
3073 // Get the exception pc in case we are deoptimized
3074 __ ldr(r4, Address(rthread, JavaThread::exception_pc_offset()));
3075 #ifdef ASSERT
3076 __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3077 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3078 #endif
3079 // Clear the exception oop so GC no longer processes it as a root.
3080 __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3081
3082 // r0: exception oop
3083 // r8: exception handler
3084 // r4: exception pc
3085 // Jump to handler
3086
3087 __ br(r8);
3088
3089 // Make sure all code is generated
3090 masm->flush();
3091
3092 // Set exception blob
3093 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3094 }
3095
3096 // ---------------------------------------------------------------
3097
3098 class NativeInvokerGenerator : public StubCodeGenerator {
3099 address _call_target;
3100 int _shadow_space_bytes;
3101
3102 const GrowableArray<VMReg>& _input_registers;
3103 const GrowableArray<VMReg>& _output_registers;
3104
3105 int _frame_complete;
3106 int _framesize;
3107 OopMapSet* _oop_maps;
3108 public:
NativeInvokerGenerator(CodeBuffer * buffer,address call_target,int shadow_space_bytes,const GrowableArray<VMReg> & input_registers,const GrowableArray<VMReg> & output_registers)3109 NativeInvokerGenerator(CodeBuffer* buffer,
3110 address call_target,
3111 int shadow_space_bytes,
3112 const GrowableArray<VMReg>& input_registers,
3113 const GrowableArray<VMReg>& output_registers)
3114 : StubCodeGenerator(buffer, PrintMethodHandleStubs),
3115 _call_target(call_target),
3116 _shadow_space_bytes(shadow_space_bytes),
3117 _input_registers(input_registers),
3118 _output_registers(output_registers),
3119 _frame_complete(0),
3120 _framesize(0),
3121 _oop_maps(NULL) {
3122 assert(_output_registers.length() <= 1
3123 || (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
3124 }
3125
3126 void generate();
3127
spill_size_in_bytes() const3128 int spill_size_in_bytes() const {
3129 if (_output_registers.length() == 0) {
3130 return 0;
3131 }
3132 VMReg reg = _output_registers.at(0);
3133 assert(reg->is_reg(), "must be a register");
3134 if (reg->is_Register()) {
3135 return 8;
3136 } else if (reg->is_FloatRegister()) {
3137 bool use_sve = Matcher::supports_scalable_vector();
3138 if (use_sve) {
3139 return Matcher::scalable_vector_reg_size(T_BYTE);
3140 }
3141 return 16;
3142 } else {
3143 ShouldNotReachHere();
3144 }
3145 return 0;
3146 }
3147
spill_output_registers()3148 void spill_output_registers() {
3149 if (_output_registers.length() == 0) {
3150 return;
3151 }
3152 VMReg reg = _output_registers.at(0);
3153 assert(reg->is_reg(), "must be a register");
3154 MacroAssembler* masm = _masm;
3155 if (reg->is_Register()) {
3156 __ spill(reg->as_Register(), true, 0);
3157 } else if (reg->is_FloatRegister()) {
3158 bool use_sve = Matcher::supports_scalable_vector();
3159 if (use_sve) {
3160 __ spill_sve_vector(reg->as_FloatRegister(), 0, Matcher::scalable_vector_reg_size(T_BYTE));
3161 } else {
3162 __ spill(reg->as_FloatRegister(), __ Q, 0);
3163 }
3164 } else {
3165 ShouldNotReachHere();
3166 }
3167 }
3168
fill_output_registers()3169 void fill_output_registers() {
3170 if (_output_registers.length() == 0) {
3171 return;
3172 }
3173 VMReg reg = _output_registers.at(0);
3174 assert(reg->is_reg(), "must be a register");
3175 MacroAssembler* masm = _masm;
3176 if (reg->is_Register()) {
3177 __ unspill(reg->as_Register(), true, 0);
3178 } else if (reg->is_FloatRegister()) {
3179 bool use_sve = Matcher::supports_scalable_vector();
3180 if (use_sve) {
3181 __ unspill_sve_vector(reg->as_FloatRegister(), 0, Matcher::scalable_vector_reg_size(T_BYTE));
3182 } else {
3183 __ unspill(reg->as_FloatRegister(), __ Q, 0);
3184 }
3185 } else {
3186 ShouldNotReachHere();
3187 }
3188 }
3189
frame_complete() const3190 int frame_complete() const {
3191 return _frame_complete;
3192 }
3193
framesize() const3194 int framesize() const {
3195 return (_framesize >> (LogBytesPerWord - LogBytesPerInt));
3196 }
3197
oop_maps() const3198 OopMapSet* oop_maps() const {
3199 return _oop_maps;
3200 }
3201
3202 private:
3203 #ifdef ASSERT
target_uses_register(VMReg reg)3204 bool target_uses_register(VMReg reg) {
3205 return _input_registers.contains(reg) || _output_registers.contains(reg);
3206 }
3207 #endif
3208 };
3209
3210 static const int native_invoker_code_size = 1024;
3211
make_native_invoker(address call_target,int shadow_space_bytes,const GrowableArray<VMReg> & input_registers,const GrowableArray<VMReg> & output_registers)3212 RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
3213 int shadow_space_bytes,
3214 const GrowableArray<VMReg>& input_registers,
3215 const GrowableArray<VMReg>& output_registers) {
3216 int locs_size = 64;
3217 CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size);
3218 NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers);
3219 g.generate();
3220 code.log_section_sizes("nep_invoker_blob");
3221
3222 RuntimeStub* stub =
3223 RuntimeStub::new_runtime_stub("nep_invoker_blob",
3224 &code,
3225 g.frame_complete(),
3226 g.framesize(),
3227 g.oop_maps(), false);
3228 return stub;
3229 }
3230
generate()3231 void NativeInvokerGenerator::generate() {
3232 assert(!(target_uses_register(rscratch1->as_VMReg())
3233 || target_uses_register(rscratch2->as_VMReg())
3234 || target_uses_register(rthread->as_VMReg())),
3235 "Register conflict");
3236
3237 enum layout {
3238 rbp_off,
3239 rbp_off2,
3240 return_off,
3241 return_off2,
3242 framesize // inclusive of return address
3243 };
3244
3245 assert(_shadow_space_bytes == 0, "not expecting shadow space on AArch64");
3246 _framesize = align_up(framesize + (spill_size_in_bytes() >> LogBytesPerInt), 4);
3247 assert(is_even(_framesize/2), "sp not 16-byte aligned");
3248
3249 _oop_maps = new OopMapSet();
3250 MacroAssembler* masm = _masm;
3251
3252 address start = __ pc();
3253
3254 __ enter();
3255
3256 // lr and fp are already in place
3257 __ sub(sp, rfp, ((unsigned)_framesize-4) << LogBytesPerInt); // prolog
3258
3259 _frame_complete = __ pc() - start;
3260
3261 address the_pc = __ pc();
3262 __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
3263 OopMap* map = new OopMap(_framesize, 0);
3264 _oop_maps->add_gc_map(the_pc - start, map);
3265
3266 // State transition
3267 __ mov(rscratch1, _thread_in_native);
3268 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
3269 __ stlrw(rscratch1, rscratch2);
3270
3271 rt_call(masm, _call_target);
3272
3273 __ mov(rscratch1, _thread_in_native_trans);
3274 __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
3275
3276 // Force this write out before the read below
3277 __ membar(Assembler::LoadLoad | Assembler::LoadStore |
3278 Assembler::StoreLoad | Assembler::StoreStore);
3279
3280 __ verify_sve_vector_length();
3281
3282 Label L_after_safepoint_poll;
3283 Label L_safepoint_poll_slow_path;
3284
3285 __ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, true /* acquire */, false /* in_nmethod */);
3286
3287 __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
3288 __ cbnzw(rscratch1, L_safepoint_poll_slow_path);
3289
3290 __ bind(L_after_safepoint_poll);
3291
3292 // change thread state
3293 __ mov(rscratch1, _thread_in_Java);
3294 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
3295 __ stlrw(rscratch1, rscratch2);
3296
3297 __ block_comment("reguard stack check");
3298 Label L_reguard;
3299 Label L_after_reguard;
3300 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
3301 __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
3302 __ br(Assembler::EQ, L_reguard);
3303 __ bind(L_after_reguard);
3304
3305 __ reset_last_Java_frame(true);
3306
3307 __ leave(); // required for proper stackwalking of RuntimeStub frame
3308 __ ret(lr);
3309
3310 //////////////////////////////////////////////////////////////////////////////
3311
3312 __ block_comment("{ L_safepoint_poll_slow_path");
3313 __ bind(L_safepoint_poll_slow_path);
3314
3315 // Need to save the native result registers around any runtime calls.
3316 spill_output_registers();
3317
3318 __ mov(c_rarg0, rthread);
3319 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
3320 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
3321 __ blr(rscratch1);
3322
3323 fill_output_registers();
3324
3325 __ b(L_after_safepoint_poll);
3326 __ block_comment("} L_safepoint_poll_slow_path");
3327
3328 //////////////////////////////////////////////////////////////////////////////
3329
3330 __ block_comment("{ L_reguard");
3331 __ bind(L_reguard);
3332
3333 spill_output_registers();
3334
3335 rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
3336
3337 fill_output_registers();
3338
3339 __ b(L_after_reguard);
3340
3341 __ block_comment("} L_reguard");
3342
3343 //////////////////////////////////////////////////////////////////////////////
3344
3345 __ flush();
3346 }
3347 #endif // COMPILER2
3348