1 /*
2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interp_masm.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/flags/flagSetting.hpp"
35 #include "runtime/frame.inline.hpp"
36 #include "utilities/preserveException.hpp"
37
38 #define __ _masm->
39
40 #ifdef PRODUCT
41 #define BLOCK_COMMENT(str) /* nothing */
42 #define STOP(error) stop(error)
43 #else
44 #define BLOCK_COMMENT(str) __ block_comment(str)
45 #define STOP(error) block_comment(error); __ stop(error)
46 #endif
47
48 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
49
50 // Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
constant(int value)51 static RegisterOrConstant constant(int value) {
52 return RegisterOrConstant(value);
53 }
54
load_klass_from_Class(MacroAssembler * _masm,Register klass_reg,Register temp_reg,Register temp2_reg)55 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) {
56 if (VerifyMethodHandles)
57 verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class), temp_reg, temp2_reg,
58 "MH argument is a Class");
59 __ ld_ptr(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg);
60 }
61
62 #ifdef ASSERT
check_nonzero(const char * xname,int x)63 static int check_nonzero(const char* xname, int x) {
64 assert(x != 0, "%s should be nonzero", xname);
65 return x;
66 }
67 #define NONZERO(x) check_nonzero(#x, x)
68 #else //ASSERT
69 #define NONZERO(x) (x)
70 #endif //ASSERT
71
72 #ifdef ASSERT
verify_klass(MacroAssembler * _masm,Register obj_reg,SystemDictionary::WKID klass_id,Register temp_reg,Register temp2_reg,const char * error_message)73 void MethodHandles::verify_klass(MacroAssembler* _masm,
74 Register obj_reg, SystemDictionary::WKID klass_id,
75 Register temp_reg, Register temp2_reg,
76 const char* error_message) {
77 InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
78 Klass* klass = SystemDictionary::well_known_klass(klass_id);
79 bool did_save = false;
80 if (temp_reg == noreg || temp2_reg == noreg) {
81 temp_reg = L1;
82 temp2_reg = L2;
83 __ save_frame_and_mov(0, obj_reg, L0);
84 obj_reg = L0;
85 did_save = true;
86 }
87 Label L_ok, L_bad;
88 BLOCK_COMMENT("verify_klass {");
89 __ verify_oop(obj_reg);
90 __ br_null_short(obj_reg, Assembler::pn, L_bad);
91 __ load_klass(obj_reg, temp_reg);
92 __ set(ExternalAddress((Metadata**)klass_addr), temp2_reg);
93 __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
94 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok);
95 intptr_t super_check_offset = klass->super_check_offset();
96 __ ld_ptr(Address(temp_reg, super_check_offset), temp_reg);
97 __ set(ExternalAddress((Metadata**)klass_addr), temp2_reg);
98 __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
99 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok);
100 __ BIND(L_bad);
101 if (did_save) __ restore();
102 __ STOP(error_message);
103 __ BIND(L_ok);
104 if (did_save) __ restore();
105 BLOCK_COMMENT("} verify_klass");
106 }
107
verify_ref_kind(MacroAssembler * _masm,int ref_kind,Register member_reg,Register temp)108 void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
109 Label L;
110 BLOCK_COMMENT("verify_ref_kind {");
111 __ lduw(Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())), temp);
112 __ srl( temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT, temp);
113 __ and3(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK, temp);
114 __ cmp_and_br_short(temp, ref_kind, Assembler::equal, Assembler::pt, L);
115 { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
116 jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
117 if (ref_kind == JVM_REF_invokeVirtual ||
118 ref_kind == JVM_REF_invokeSpecial)
119 // could do this for all ref_kinds, but would explode assembly code size
120 trace_method_handle(_masm, buf);
121 __ STOP(buf);
122 }
123 BLOCK_COMMENT("} verify_ref_kind");
124 __ bind(L);
125 }
126
127 #endif // ASSERT
128
jump_from_method_handle(MacroAssembler * _masm,Register method,Register target,Register temp,bool for_compiler_entry)129 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
130 bool for_compiler_entry) {
131 Label L_no_such_method;
132 assert(method == G5_method, "interpreter calling convention");
133 assert_different_registers(method, target, temp);
134
135 if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
136 Label run_compiled_code;
137 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
138 // compiled code in threads for which the event is enabled. Check here for
139 // interp_only_mode if these events CAN be enabled.
140 __ verify_thread();
141 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
142 __ ld(interp_only, temp);
143 __ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code);
144 // Null method test is replicated below in compiled case,
145 // it might be able to address across the verify_thread()
146 __ br_null_short(G5_method, Assembler::pn, L_no_such_method);
147 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target);
148 __ jmp(target, 0);
149 __ delayed()->nop();
150 __ BIND(run_compiled_code);
151 // Note: we could fill some delay slots here, but
152 // it doesn't matter, since this is interpreter code.
153 }
154
155 // Compiled case, either static or fall-through from runtime conditional
156 __ br_null_short(G5_method, Assembler::pn, L_no_such_method);
157
158 const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
159 Method::from_interpreted_offset();
160 __ ld_ptr(G5_method, in_bytes(entry_offset), target);
161 __ jmp(target, 0);
162 __ delayed()->nop();
163
164 __ bind(L_no_such_method);
165 AddressLiteral ame(StubRoutines::throw_AbstractMethodError_entry());
166 __ jump_to(ame, temp);
167 __ delayed()->nop();
168 }
169
jump_to_lambda_form(MacroAssembler * _masm,Register recv,Register method_temp,Register temp2,Register temp3,bool for_compiler_entry)170 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
171 Register recv, Register method_temp,
172 Register temp2, Register temp3,
173 bool for_compiler_entry) {
174 BLOCK_COMMENT("jump_to_lambda_form {");
175 // This is the initial entry point of a lazy method handle.
176 // After type checking, it picks up the invoker from the LambdaForm.
177 assert_different_registers(recv, method_temp, temp2); // temp3 is only passed on
178 assert(method_temp == G5_method, "required register for loading method");
179
180 //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });
181
182 // Load the invoker, as MH -> MH.form -> LF.vmentry
183 __ verify_oop(recv);
184 __ load_heap_oop(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), method_temp, temp2);
185 __ verify_oop(method_temp);
186 __ load_heap_oop(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp, temp2);
187 __ verify_oop(method_temp);
188 __ load_heap_oop(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), method_temp, temp2);
189 __ verify_oop(method_temp);
190 __ ld_ptr(Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())), method_temp);
191
192 if (VerifyMethodHandles && !for_compiler_entry) {
193 // make sure recv is already on stack
194 __ ld_ptr(method_temp, in_bytes(Method::const_offset()), temp2);
195 __ load_sized_value(Address(temp2, ConstMethod::size_of_parameters_offset()),
196 temp2,
197 sizeof(u2), /*is_signed*/ false);
198 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
199 Label L;
200 __ ld_ptr(__ argument_address(temp2, temp2, -1), temp2);
201 __ cmp_and_br_short(temp2, recv, Assembler::equal, Assembler::pt, L);
202 __ STOP("receiver not on stack");
203 __ BIND(L);
204 }
205
206 jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry);
207 BLOCK_COMMENT("} jump_to_lambda_form");
208 }
209
210
211 // Code generation
generate_method_handle_interpreter_entry(MacroAssembler * _masm,vmIntrinsics::ID iid)212 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
213 vmIntrinsics::ID iid) {
214 const bool not_for_compiler_entry = false; // this is the interpreter entry
215 assert(is_signature_polymorphic(iid), "expected invoke iid");
216 if (iid == vmIntrinsics::_invokeGeneric ||
217 iid == vmIntrinsics::_compiledLambdaForm) {
218 // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
219 // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
220 // They all allow an appendix argument.
221 __ should_not_reach_here(); // empty stubs make SG sick
222 return NULL;
223 }
224
225 // I5_savedSP/O5_savedSP: sender SP (must preserve; see prepare_to_jump_from_interpreted)
226 // G5_method: Method*
227 // G4 (Gargs): incoming argument list (must preserve)
228 // O0: used as temp to hold mh or receiver
229 // O1, O4: garbage temps, blown away
230 Register O1_scratch = O1;
231 Register O4_param_size = O4; // size of parameters
232
233 // here's where control starts out:
234 __ align(CodeEntryAlignment);
235 address entry_point = __ pc();
236
237 if (VerifyMethodHandles) {
238 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
239
240 Label L;
241 BLOCK_COMMENT("verify_intrinsic_id {");
242 __ lduh(Address(G5_method, Method::intrinsic_id_offset_in_bytes()), O1_scratch);
243 __ cmp_and_br_short(O1_scratch, (int) iid, Assembler::equal, Assembler::pt, L);
244 if (iid == vmIntrinsics::_linkToVirtual ||
245 iid == vmIntrinsics::_linkToSpecial) {
246 // could do this for all kinds, but would explode assembly code size
247 trace_method_handle(_masm, "bad Method*::intrinsic_id");
248 }
249 __ STOP("bad Method*::intrinsic_id");
250 __ bind(L);
251 BLOCK_COMMENT("} verify_intrinsic_id");
252 }
253
254 // First task: Find out how big the argument list is.
255 Address O4_first_arg_addr;
256 int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
257 assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
258 if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
259 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), O4_param_size);
260 __ load_sized_value(Address(O4_param_size, ConstMethod::size_of_parameters_offset()),
261 O4_param_size,
262 sizeof(u2), /*is_signed*/ false);
263 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
264 O4_first_arg_addr = __ argument_address(O4_param_size, O4_param_size, -1);
265 } else {
266 DEBUG_ONLY(O4_param_size = noreg);
267 }
268
269 Register O0_mh = noreg;
270 if (!is_signature_polymorphic_static(iid)) {
271 __ ld_ptr(O4_first_arg_addr, O0_mh = O0);
272 DEBUG_ONLY(O4_param_size = noreg);
273 }
274
275 // O4_first_arg_addr is live!
276
277 if (TraceMethodHandles) {
278 if (O0_mh != noreg)
279 __ mov(O0_mh, G3_method_handle); // make stub happy
280 trace_method_handle_interpreter_entry(_masm, iid);
281 }
282
283 if (iid == vmIntrinsics::_invokeBasic) {
284 generate_method_handle_dispatch(_masm, iid, O0_mh, noreg, not_for_compiler_entry);
285
286 } else {
287 // Adjust argument list by popping the trailing MemberName argument.
288 Register O0_recv = noreg;
289 if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
290 // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
291 __ ld_ptr(O4_first_arg_addr, O0_recv = O0);
292 DEBUG_ONLY(O4_param_size = noreg);
293 }
294 Register G5_member = G5_method; // MemberName ptr; incoming method ptr is dead now
295 __ ld_ptr(__ argument_address(constant(0)), G5_member);
296 __ add(Gargs, Interpreter::stackElementSize, Gargs);
297 generate_method_handle_dispatch(_masm, iid, O0_recv, G5_member, not_for_compiler_entry);
298 }
299
300 return entry_point;
301 }
302
generate_method_handle_dispatch(MacroAssembler * _masm,vmIntrinsics::ID iid,Register receiver_reg,Register member_reg,bool for_compiler_entry)303 void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
304 vmIntrinsics::ID iid,
305 Register receiver_reg,
306 Register member_reg,
307 bool for_compiler_entry) {
308 assert(is_signature_polymorphic(iid), "expected invoke iid");
309 Register temp1 = (for_compiler_entry ? G1_scratch : O1);
310 Register temp2 = (for_compiler_entry ? G3_scratch : O2);
311 Register temp3 = (for_compiler_entry ? G4_scratch : O3);
312 Register temp4 = (for_compiler_entry ? noreg : O4);
313 if (for_compiler_entry) {
314 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : O0), "only valid assignment");
315 assert_different_registers(temp1, O0, O1, O2, O3, O4, O5);
316 assert_different_registers(temp2, O0, O1, O2, O3, O4, O5);
317 assert_different_registers(temp3, O0, O1, O2, O3, O4, O5);
318 assert_different_registers(temp4, O0, O1, O2, O3, O4, O5);
319 } else {
320 assert_different_registers(temp1, temp2, temp3, temp4, O5_savedSP); // don't trash lastSP
321 }
322 if (receiver_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg);
323 if (member_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, member_reg);
324
325 if (iid == vmIntrinsics::_invokeBasic) {
326 // indirect through MH.form.vmentry.vmtarget
327 jump_to_lambda_form(_masm, receiver_reg, G5_method, temp1, temp2, for_compiler_entry);
328
329 } else {
330 // The method is a member invoker used by direct method handles.
331 if (VerifyMethodHandles) {
332 // make sure the trailing argument really is a MemberName (caller responsibility)
333 verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(MemberName_klass),
334 temp1, temp2,
335 "MemberName required for invokeVirtual etc.");
336 }
337
338 Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
339 Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
340 Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
341 Address vmtarget_method( G5_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
342
343 Register temp1_recv_klass = temp1;
344 if (iid != vmIntrinsics::_linkToStatic) {
345 __ verify_oop(receiver_reg);
346 if (iid == vmIntrinsics::_linkToSpecial) {
347 // Don't actually load the klass; just null-check the receiver.
348 __ null_check(receiver_reg);
349 } else {
350 // load receiver klass itself
351 __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
352 __ load_klass(receiver_reg, temp1_recv_klass);
353 __ verify_klass_ptr(temp1_recv_klass);
354 }
355 BLOCK_COMMENT("check_receiver {");
356 // The receiver for the MemberName must be in receiver_reg.
357 // Check the receiver against the MemberName.clazz
358 if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
359 // Did not load it above...
360 __ load_klass(receiver_reg, temp1_recv_klass);
361 __ verify_klass_ptr(temp1_recv_klass);
362 }
363 if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
364 Label L_ok;
365 Register temp2_defc = temp2;
366 __ load_heap_oop(member_clazz, temp2_defc, temp3);
367 load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
368 __ verify_klass_ptr(temp2_defc);
369 __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok);
370 // If we get here, the type check failed!
371 __ STOP("receiver class disagrees with MemberName.clazz");
372 __ bind(L_ok);
373 }
374 BLOCK_COMMENT("} check_receiver");
375 }
376 if (iid == vmIntrinsics::_linkToSpecial ||
377 iid == vmIntrinsics::_linkToStatic) {
378 DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass
379 }
380
381 // Live registers at this point:
382 // member_reg - MemberName that was the trailing argument
383 // temp1_recv_klass - klass of stacked receiver, if needed
384 // O5_savedSP - interpreter linkage (if interpreted)
385 // O0..O5 - compiler arguments (if compiled)
386
387 Label L_incompatible_class_change_error;
388 switch (iid) {
389 case vmIntrinsics::_linkToSpecial:
390 if (VerifyMethodHandles) {
391 verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
392 }
393 __ load_heap_oop(member_vmtarget, G5_method, temp3);
394 __ ld_ptr(vmtarget_method, G5_method);
395 break;
396
397 case vmIntrinsics::_linkToStatic:
398 if (VerifyMethodHandles) {
399 verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
400 }
401 __ load_heap_oop(member_vmtarget, G5_method, temp3);
402 __ ld_ptr(vmtarget_method, G5_method);
403 break;
404
405 case vmIntrinsics::_linkToVirtual:
406 {
407 // same as TemplateTable::invokevirtual,
408 // minus the CP setup and profiling:
409
410 if (VerifyMethodHandles) {
411 verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp2);
412 }
413
414 // pick out the vtable index from the MemberName, and then we can discard it:
415 Register temp2_index = temp2;
416 __ ld_ptr(member_vmindex, temp2_index);
417
418 if (VerifyMethodHandles) {
419 Label L_index_ok;
420 __ cmp_and_br_short(temp2_index, (int) 0, Assembler::greaterEqual, Assembler::pn, L_index_ok);
421 __ STOP("no virtual index");
422 __ BIND(L_index_ok);
423 }
424
425 // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget
426 // at this point. And VerifyMethodHandles has already checked clazz, if needed.
427
428 // get target Method* & entry point
429 __ lookup_virtual_method(temp1_recv_klass, temp2_index, G5_method);
430 break;
431 }
432
433 case vmIntrinsics::_linkToInterface:
434 {
435 // same as TemplateTable::invokeinterface
436 // (minus the CP setup and profiling, with different argument motion)
437 if (VerifyMethodHandles) {
438 verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp2);
439 }
440
441 Register temp2_intf = temp2;
442 __ load_heap_oop(member_clazz, temp2_intf, temp3);
443 load_klass_from_Class(_masm, temp2_intf, temp3, temp4);
444 __ verify_klass_ptr(temp2_intf);
445
446 Register G5_index = G5_method;
447 __ ld_ptr(member_vmindex, G5_index);
448 if (VerifyMethodHandles) {
449 Label L;
450 __ cmp_and_br_short(G5_index, 0, Assembler::greaterEqual, Assembler::pt, L);
451 __ STOP("invalid vtable index for MH.invokeInterface");
452 __ bind(L);
453 }
454
455 // given intf, index, and recv klass, dispatch to the implementation method
456 __ lookup_interface_method(temp1_recv_klass, temp2_intf,
457 // note: next two args must be the same:
458 G5_index, G5_method,
459 temp3, temp4,
460 L_incompatible_class_change_error);
461 break;
462 }
463
464 default:
465 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
466 break;
467 }
468
469 // Live at this point:
470 // G5_method
471 // O5_savedSP (if interpreted)
472
473 // After figuring out which concrete method to call, jump into it.
474 // Note that this works in the interpreter with no data motion.
475 // But the compiled version will require that rcx_recv be shifted out.
476 __ verify_method_ptr(G5_method);
477 jump_from_method_handle(_masm, G5_method, temp1, temp2, for_compiler_entry);
478
479 if (iid == vmIntrinsics::_linkToInterface) {
480 __ BIND(L_incompatible_class_change_error);
481 AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry());
482 __ jump_to(icce, temp1);
483 __ delayed()->nop();
484 }
485 }
486 }
487
488 #ifndef PRODUCT
trace_method_handle_stub(const char * adaptername,oopDesc * mh,intptr_t * saved_sp,intptr_t * args,intptr_t * tracing_fp)489 void trace_method_handle_stub(const char* adaptername,
490 oopDesc* mh,
491 intptr_t* saved_sp,
492 intptr_t* args,
493 intptr_t* tracing_fp) {
494 bool has_mh = (strstr(adaptername, "/static") == NULL &&
495 strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
496 const char* mh_reg_name = has_mh ? "G3_mh" : "G3";
497 tty->print_cr("MH %s %s=" INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT,
498 adaptername, mh_reg_name,
499 p2i(mh), p2i(saved_sp), p2i(args));
500
501 if (Verbose) {
502 // dumping last frame with frame::describe
503
504 JavaThread* p = JavaThread::active();
505
506 ResourceMark rm;
507 PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
508 FrameValues values;
509
510 // Note: We want to allow trace_method_handle from any call site.
511 // While trace_method_handle creates a frame, it may be entered
512 // without a valid return PC in O7 (e.g. not just after a call).
513 // Walking that frame could lead to failures due to that invalid PC.
514 // => carefully detect that frame when doing the stack walking
515
516 // walk up to the right frame using the "tracing_fp" argument
517 intptr_t* cur_sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
518 frame cur_frame(cur_sp, frame::unpatchable, NULL);
519
520 while (cur_frame.fp() != (intptr_t *)(STACK_BIAS+(uintptr_t)tracing_fp)) {
521 cur_frame = os::get_sender_for_C_frame(&cur_frame);
522 }
523
524 // safely create a frame and call frame::describe
525 intptr_t *dump_sp = cur_frame.sender_sp();
526 intptr_t *dump_fp = cur_frame.link();
527
528 bool walkable = has_mh; // whether the traced frame shoud be walkable
529
530 // the sender for cur_frame is the caller of trace_method_handle
531 if (walkable) {
532 // The previous definition of walkable may have to be refined
533 // if new call sites cause the next frame constructor to start
534 // failing. Alternatively, frame constructors could be
535 // modified to support the current or future non walkable
536 // frames (but this is more intrusive and is not considered as
537 // part of this RFE, which will instead use a simpler output).
538 frame dump_frame = frame(dump_sp,
539 cur_frame.sp(), // younger_sp
540 false); // no adaptation
541 dump_frame.describe(values, 1);
542 } else {
543 // Robust dump for frames which cannot be constructed from sp/younger_sp
544 // Add descriptions without building a Java frame to avoid issues
545 values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
546 values.describe(-1, dump_sp, "sp");
547 }
548
549 bool has_args = has_mh; // whether Gargs is meaningful
550
551 // mark args, if seems valid (may not be valid for some adapters)
552 if (has_args) {
553 if ((args >= dump_sp) && (args < dump_fp)) {
554 values.describe(-1, args, "*G4_args");
555 }
556 }
557
558 // mark saved_sp, if seems valid (may not be valid for some adapters)
559 intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp);
560 const int ARG_LIMIT = 255, SLOP = 45, UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP);
561 if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) {
562 values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS");
563 }
564
565 // Note: the unextended_sp may not be correct
566 tty->print_cr(" stack layout:");
567 values.print(p);
568 if (has_mh && oopDesc::is_oop(mh)) {
569 mh->print();
570 if (java_lang_invoke_MethodHandle::is_instance(mh)) {
571 if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
572 java_lang_invoke_MethodHandle::form(mh)->print();
573 }
574 }
575 }
576 }
577
trace_method_handle(MacroAssembler * _masm,const char * adaptername)578 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
579 if (!TraceMethodHandles) return;
580 BLOCK_COMMENT("trace_method_handle {");
581 // save: Gargs, O5_savedSP
582 __ save_frame(16); // need space for saving required FPU state
583
584 __ set((intptr_t) adaptername, O0);
585 __ mov(G3_method_handle, O1);
586 __ mov(I5_savedSP, O2);
587 __ mov(Gargs, O3);
588 __ mov(I6, O4); // frame identifier for safe stack walking
589
590 // Save scratched registers that might be needed. Robustness is more
591 // important than optimizing the saves for this debug only code.
592
593 // save FP result, valid at some call sites (adapter_opt_return_float, ...)
594 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
595 __ stf(FloatRegisterImpl::D, Ftos_d, d_save);
596 // Safely save all globals but G2 (handled by call_VM_leaf) and G7
597 // (OS reserved).
598 __ mov(G3_method_handle, L3);
599 __ mov(Gargs, L4);
600 __ mov(G5_method_type, L5);
601 __ mov(G6, L6);
602 __ mov(G1, L1);
603
604 __ call_VM_leaf(L2 /* for G2 */, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
605
606 __ mov(L3, G3_method_handle);
607 __ mov(L4, Gargs);
608 __ mov(L5, G5_method_type);
609 __ mov(L6, G6);
610 __ mov(L1, G1);
611 __ ldf(FloatRegisterImpl::D, d_save, Ftos_d);
612
613 __ restore();
614 BLOCK_COMMENT("} trace_method_handle");
615 }
616 #endif // PRODUCT
617