1 /*
2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "memory/universe.hpp"
30 #include "oops/markWord.hpp"
31 #include "oops/method.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/frame.inline.hpp"
35 #include "runtime/handles.inline.hpp"
36 #include "runtime/javaCalls.hpp"
37 #include "runtime/monitorChunk.hpp"
38 #include "runtime/os.inline.hpp"
39 #include "runtime/signature.hpp"
40 #include "runtime/stubCodeGenerator.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "vmreg_aarch64.inline.hpp"
43 #ifdef COMPILER1
44 #include "c1/c1_Runtime1.hpp"
45 #include "runtime/vframeArray.hpp"
46 #endif
47
48 #ifdef ASSERT
check_location_valid()49 void RegisterMap::check_location_valid() {
50 }
51 #endif
52
53
54 // Profiling/safepoint support
55
safe_for_sender(JavaThread * thread)56 bool frame::safe_for_sender(JavaThread *thread) {
57 address sp = (address)_sp;
58 address fp = (address)_fp;
59 address unextended_sp = (address)_unextended_sp;
60
61 // consider stack guards when trying to determine "safe" stack pointers
62 // sp must be within the usable part of the stack (not in guards)
63 if (!thread->is_in_usable_stack(sp)) {
64 return false;
65 }
66
67 // When we are running interpreted code the machine stack pointer, SP, is
68 // set low enough so that the Java expression stack can grow and shrink
69 // without ever exceeding the machine stack bounds. So, ESP >= SP.
70
71 // When we call out of an interpreted method, SP is incremented so that
72 // the space between SP and ESP is removed. The SP saved in the callee's
73 // frame is the SP *before* this increment. So, when we walk a stack of
74 // interpreter frames the sender's SP saved in a frame might be less than
75 // the SP at the point of call.
76
77 // So unextended sp must be within the stack but we need not to check
78 // that unextended sp >= sp
79 if (!thread->is_in_full_stack_checked(unextended_sp)) {
80 return false;
81 }
82
83 // an fp must be within the stack and above (but not equal) sp
84 // second evaluation on fp+ is added to handle situation where fp is -1
85 bool fp_safe = thread->is_in_stack_range_excl(fp, sp) &&
86 thread->is_in_full_stack_checked(fp + (return_addr_offset * sizeof(void*)));
87
88 // We know sp/unextended_sp are safe only fp is questionable here
89
90 // If the current frame is known to the code cache then we can attempt to
91 // to construct the sender and do some validation of it. This goes a long way
92 // toward eliminating issues when we get in frame construction code
93
94 if (_cb != NULL ) {
95
96 // First check if frame is complete and tester is reliable
97 // Unfortunately we can only check frame complete for runtime stubs and nmethod
98 // other generic buffer blobs are more problematic so we just assume they are
99 // ok. adapter blobs never have a frame complete and are never ok.
100
101 if (!_cb->is_frame_complete_at(_pc)) {
102 if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
103 return false;
104 }
105 }
106
107 // Could just be some random pointer within the codeBlob
108 if (!_cb->code_contains(_pc)) {
109 return false;
110 }
111
112 // Entry frame checks
113 if (is_entry_frame()) {
114 // an entry frame must have a valid fp.
115 return fp_safe && is_entry_frame_valid(thread);
116 }
117
118 intptr_t* sender_sp = NULL;
119 intptr_t* sender_unextended_sp = NULL;
120 address sender_pc = NULL;
121 intptr_t* saved_fp = NULL;
122
123 if (is_interpreted_frame()) {
124 // fp must be safe
125 if (!fp_safe) {
126 return false;
127 }
128
129 sender_pc = (address) this->fp()[return_addr_offset];
130 // for interpreted frames, the value below is the sender "raw" sp,
131 // which can be different from the sender unextended sp (the sp seen
132 // by the sender) because of current frame local variables
133 sender_sp = (intptr_t*) addr_at(sender_sp_offset);
134 sender_unextended_sp = (intptr_t*) this->fp()[interpreter_frame_sender_sp_offset];
135 saved_fp = (intptr_t*) this->fp()[link_offset];
136
137 } else {
138 // must be some sort of compiled/runtime frame
139 // fp does not have to be safe (although it could be check for c1?)
140
141 // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
142 if (_cb->frame_size() <= 0) {
143 return false;
144 }
145
146 sender_sp = _unextended_sp + _cb->frame_size();
147 // Is sender_sp safe?
148 if (!thread->is_in_full_stack_checked((address)sender_sp)) {
149 return false;
150 }
151 sender_unextended_sp = sender_sp;
152 sender_pc = (address) *(sender_sp-1);
153 // Note: frame::sender_sp_offset is only valid for compiled frame
154 saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
155 }
156
157
158 // If the potential sender is the interpreter then we can do some more checking
159 if (Interpreter::contains(sender_pc)) {
160
161 // fp is always saved in a recognizable place in any code we generate. However
162 // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved fp
163 // is really a frame pointer.
164
165 if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) {
166 return false;
167 }
168
169 // construct the potential sender
170
171 frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
172
173 return sender.is_interpreted_frame_valid(thread);
174
175 }
176
177 // We must always be able to find a recognizable pc
178 CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
179 if (sender_pc == NULL || sender_blob == NULL) {
180 return false;
181 }
182
183 // Could be a zombie method
184 if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
185 return false;
186 }
187
188 // Could just be some random pointer within the codeBlob
189 if (!sender_blob->code_contains(sender_pc)) {
190 return false;
191 }
192
193 // We should never be able to see an adapter if the current frame is something from code cache
194 if (sender_blob->is_adapter_blob()) {
195 return false;
196 }
197
198 // Could be the call_stub
199 if (StubRoutines::returns_to_call_stub(sender_pc)) {
200 if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) {
201 return false;
202 }
203
204 // construct the potential sender
205
206 frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
207
208 // Validate the JavaCallWrapper an entry frame must have
209 address jcw = (address)sender.entry_frame_call_wrapper();
210
211 return thread->is_in_stack_range_excl(jcw, (address)sender.fp());
212 }
213
214 CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
215 if (nm != NULL) {
216 if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
217 nm->method()->is_method_handle_intrinsic()) {
218 return false;
219 }
220 }
221
222 // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
223 // because the return address counts against the callee's frame.
224
225 if (sender_blob->frame_size() <= 0) {
226 assert(!sender_blob->is_compiled(), "should count return address at least");
227 return false;
228 }
229
230 // We should never be able to see anything here except an nmethod. If something in the
231 // code cache (current frame) is called by an entity within the code cache that entity
232 // should not be anything but the call stub (already covered), the interpreter (already covered)
233 // or an nmethod.
234
235 if (!sender_blob->is_compiled()) {
236 return false;
237 }
238
239 // Could put some more validation for the potential non-interpreted sender
240 // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
241
242 // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
243
244 // We've validated the potential sender that would be created
245 return true;
246 }
247
248 // Must be native-compiled frame. Since sender will try and use fp to find
249 // linkages it must be safe
250
251 if (!fp_safe) {
252 return false;
253 }
254
255 // Will the pc we fetch be non-zero (which we'll find at the oldest frame)
256
257 if ( (address) this->fp()[return_addr_offset] == NULL) return false;
258
259
260 // could try and do some more potential verification of native frame if we could think of some...
261
262 return true;
263
264 }
265
patch_pc(Thread * thread,address pc)266 void frame::patch_pc(Thread* thread, address pc) {
267 assert(_cb == CodeCache::find_blob(pc), "unexpected pc");
268 address* pc_addr = &(((address*) sp())[-1]);
269 if (TracePcPatching) {
270 tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]",
271 p2i(pc_addr), p2i(*pc_addr), p2i(pc));
272 }
273 // Either the return address is the original one or we are going to
274 // patch in the same address that's already there.
275 assert(_pc == *pc_addr || pc == *pc_addr, "must be");
276 *pc_addr = pc;
277 address original_pc = CompiledMethod::get_deopt_original_pc(this);
278 if (original_pc != NULL) {
279 assert(original_pc == _pc, "expected original PC to be stored before patching");
280 _deopt_state = is_deoptimized;
281 // leave _pc as is
282 } else {
283 _deopt_state = not_deoptimized;
284 _pc = pc;
285 }
286 }
287
is_interpreted_frame() const288 bool frame::is_interpreted_frame() const {
289 return Interpreter::contains(pc());
290 }
291
frame_size(RegisterMap * map) const292 int frame::frame_size(RegisterMap* map) const {
293 frame sender = this->sender(map);
294 return sender.sp() - sp();
295 }
296
entry_frame_argument_at(int offset) const297 intptr_t* frame::entry_frame_argument_at(int offset) const {
298 // convert offset to index to deal with tsi
299 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
300 // Entry frame's arguments are always in relation to unextended_sp()
301 return &unextended_sp()[index];
302 }
303
304 // sender_sp
interpreter_frame_sender_sp() const305 intptr_t* frame::interpreter_frame_sender_sp() const {
306 assert(is_interpreted_frame(), "interpreted frame expected");
307 return (intptr_t*) at(interpreter_frame_sender_sp_offset);
308 }
309
set_interpreter_frame_sender_sp(intptr_t * sender_sp)310 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
311 assert(is_interpreted_frame(), "interpreted frame expected");
312 ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
313 }
314
315
316 // monitor elements
317
interpreter_frame_monitor_begin() const318 BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
319 return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
320 }
321
interpreter_frame_monitor_end() const322 BasicObjectLock* frame::interpreter_frame_monitor_end() const {
323 BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
324 // make sure the pointer points inside the frame
325 assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
326 assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer");
327 return result;
328 }
329
interpreter_frame_set_monitor_end(BasicObjectLock * value)330 void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
331 *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
332 }
333
334 // Used by template based interpreter deoptimization
interpreter_frame_set_last_sp(intptr_t * sp)335 void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
336 *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
337 }
338
sender_for_entry_frame(RegisterMap * map) const339 frame frame::sender_for_entry_frame(RegisterMap* map) const {
340 assert(map != NULL, "map must be set");
341 // Java frame called from C; skip all C frames and return top C
342 // frame of that chunk as the sender
343 JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
344 assert(!entry_frame_is_first(), "next Java fp must be non zero");
345 assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
346 // Since we are walking the stack now this nested anchor is obviously walkable
347 // even if it wasn't when it was stacked.
348 if (!jfa->walkable()) {
349 // Capture _last_Java_pc (if needed) and mark anchor walkable.
350 jfa->capture_last_Java_pc();
351 }
352 map->clear();
353 assert(map->include_argument_oops(), "should be set by clear");
354 vmassert(jfa->last_Java_pc() != NULL, "not walkable");
355 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
356 return fr;
357 }
358
359 //------------------------------------------------------------------------------
360 // frame::verify_deopt_original_pc
361 //
362 // Verifies the calculated original PC of a deoptimization PC for the
363 // given unextended SP.
364 #ifdef ASSERT
verify_deopt_original_pc(CompiledMethod * nm,intptr_t * unextended_sp)365 void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
366 frame fr;
367
368 // This is ugly but it's better than to change {get,set}_original_pc
369 // to take an SP value as argument. And it's only a debugging
370 // method anyway.
371 fr._unextended_sp = unextended_sp;
372
373 address original_pc = nm->get_original_pc(&fr);
374 assert(nm->insts_contains_inclusive(original_pc),
375 "original PC must be in the main code section of the the compiled method (or must be immediately following it)");
376 }
377 #endif
378
379 //------------------------------------------------------------------------------
380 // frame::adjust_unextended_sp
adjust_unextended_sp()381 void frame::adjust_unextended_sp() {
382 // On aarch64, sites calling method handle intrinsics and lambda forms are treated
383 // as any other call site. Therefore, no special action is needed when we are
384 // returning to any of these call sites.
385
386 if (_cb != NULL) {
387 CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
388 if (sender_cm != NULL) {
389 // If the sender PC is a deoptimization point, get the original PC.
390 if (sender_cm->is_deopt_entry(_pc) ||
391 sender_cm->is_deopt_mh_entry(_pc)) {
392 DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
393 }
394 }
395 }
396 }
397
398 //------------------------------------------------------------------------------
399 // frame::update_map_with_saved_link
update_map_with_saved_link(RegisterMap * map,intptr_t ** link_addr)400 void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
401 // The interpreter and compiler(s) always save fp in a known
402 // location on entry. We must record where that location is
403 // so that if fp was live on callout from c2 we can find
404 // the saved copy no matter what it called.
405
406 // Since the interpreter always saves fp if we record where it is then
407 // we don't have to always save fp on entry and exit to c2 compiled
408 // code, on entry will be enough.
409 map->set_location(rfp->as_VMReg(), (address) link_addr);
410 // this is weird "H" ought to be at a higher address however the
411 // oopMaps seems to have the "H" regs at the same address and the
412 // vanilla register.
413 // XXXX make this go away
414 if (true) {
415 map->set_location(rfp->as_VMReg()->next(), (address) link_addr);
416 }
417 }
418
419
420 //------------------------------------------------------------------------------
421 // frame::sender_for_interpreter_frame
sender_for_interpreter_frame(RegisterMap * map) const422 frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
423 // SP is the raw SP from the sender after adapter or interpreter
424 // extension.
425 intptr_t* sender_sp = this->sender_sp();
426
427 // This is the sp before any possible extension (adapter/locals).
428 intptr_t* unextended_sp = interpreter_frame_sender_sp();
429
430 #if COMPILER2_OR_JVMCI
431 if (map->update_map()) {
432 update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
433 }
434 #endif // COMPILER2_OR_JVMCI
435
436 return frame(sender_sp, unextended_sp, link(), sender_pc());
437 }
438
439
440 //------------------------------------------------------------------------------
441 // frame::sender_for_compiled_frame
sender_for_compiled_frame(RegisterMap * map) const442 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
443 // we cannot rely upon the last fp having been saved to the thread
444 // in C2 code but it will have been pushed onto the stack. so we
445 // have to find it relative to the unextended sp
446
447 assert(_cb->frame_size() >= 0, "must have non-zero frame size");
448 intptr_t* l_sender_sp = unextended_sp() + _cb->frame_size();
449 intptr_t* unextended_sp = l_sender_sp;
450
451 // the return_address is always the word on the stack
452 address sender_pc = (address) *(l_sender_sp-1);
453
454 intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp - frame::sender_sp_offset);
455
456 // assert (sender_sp() == l_sender_sp, "should be");
457 // assert (*saved_fp_addr == link(), "should be");
458
459 if (map->update_map()) {
460 // Tell GC to use argument oopmaps for some runtime stubs that need it.
461 // For C1, the runtime stub might not have oop maps, so set this flag
462 // outside of update_register_map.
463 map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
464 if (_cb->oop_maps() != NULL) {
465 OopMapSet::update_register_map(this, map);
466 }
467
468 // Since the prolog does the save and restore of FP there is no
469 // oopmap for it so we must fill in its location as if there was
470 // an oopmap entry since if our caller was compiled code there
471 // could be live jvm state in it.
472 update_map_with_saved_link(map, saved_fp_addr);
473 }
474
475 return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
476 }
477
478 //------------------------------------------------------------------------------
479 // frame::sender
sender(RegisterMap * map) const480 frame frame::sender(RegisterMap* map) const {
481 // Default is we done have to follow them. The sender_for_xxx will
482 // update it accordingly
483 map->set_include_argument_oops(false);
484
485 if (is_entry_frame())
486 return sender_for_entry_frame(map);
487 if (is_interpreted_frame())
488 return sender_for_interpreter_frame(map);
489 assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
490
491 // This test looks odd: why is it not is_compiled_frame() ? That's
492 // because stubs also have OOP maps.
493 if (_cb != NULL) {
494 return sender_for_compiled_frame(map);
495 }
496
497 // Must be native-compiled frame, i.e. the marshaling code for native
498 // methods that exists in the core system.
499 return frame(sender_sp(), link(), sender_pc());
500 }
501
is_interpreted_frame_valid(JavaThread * thread) const502 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
503 assert(is_interpreted_frame(), "Not an interpreted frame");
504 // These are reasonable sanity checks
505 if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
506 return false;
507 }
508 if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
509 return false;
510 }
511 if (fp() + interpreter_frame_initial_sp_offset < sp()) {
512 return false;
513 }
514 // These are hacks to keep us out of trouble.
515 // The problem with these is that they mask other problems
516 if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
517 return false;
518 }
519
520 // do some validation of frame elements
521
522 // first the method
523
524 Method* m = *interpreter_frame_method_addr();
525
526 // validate the method we'd find in this potential sender
527 if (!Method::is_valid_method(m)) return false;
528
529 // stack frames shouldn't be much larger than max_stack elements
530 // this test requires the use of unextended_sp which is the sp as seen by
531 // the current frame, and not sp which is the "raw" pc which could point
532 // further because of local variables of the callee method inserted after
533 // method arguments
534 if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
535 return false;
536 }
537
538 // validate bci/bcx
539
540 address bcp = interpreter_frame_bcp();
541 if (m->validate_bci_from_bcp(bcp) < 0) {
542 return false;
543 }
544
545 // validate constantPoolCache*
546 ConstantPoolCache* cp = *interpreter_frame_cache_addr();
547 if (MetaspaceObj::is_valid(cp) == false) return false;
548
549 // validate locals
550
551 address locals = (address) *interpreter_frame_locals_addr();
552 return thread->is_in_stack_range_incl(locals, (address)fp());
553 }
554
interpreter_frame_result(oop * oop_result,jvalue * value_result)555 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
556 assert(is_interpreted_frame(), "interpreted frame expected");
557 Method* method = interpreter_frame_method();
558 BasicType type = method->result_type();
559
560 intptr_t* tos_addr;
561 if (method->is_native()) {
562 // TODO : ensure AARCH64 does the same as Intel here i.e. push v0 then r0
563 // Prior to calling into the runtime to report the method_exit the possible
564 // return value is pushed to the native stack. If the result is a jfloat/jdouble
565 // then ST0 is saved before EAX/EDX. See the note in generate_native_result
566 tos_addr = (intptr_t*)sp();
567 if (type == T_FLOAT || type == T_DOUBLE) {
568 // This is times two because we do a push(ltos) after pushing XMM0
569 // and that takes two interpreter stack slots.
570 tos_addr += 2 * Interpreter::stackElementWords;
571 }
572 } else {
573 tos_addr = (intptr_t*)interpreter_frame_tos_address();
574 }
575
576 switch (type) {
577 case T_OBJECT :
578 case T_ARRAY : {
579 oop obj;
580 if (method->is_native()) {
581 obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
582 } else {
583 oop* obj_p = (oop*)tos_addr;
584 obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
585 }
586 assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
587 *oop_result = obj;
588 break;
589 }
590 case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break;
591 case T_BYTE : value_result->b = *(jbyte*)tos_addr; break;
592 case T_CHAR : value_result->c = *(jchar*)tos_addr; break;
593 case T_SHORT : value_result->s = *(jshort*)tos_addr; break;
594 case T_INT : value_result->i = *(jint*)tos_addr; break;
595 case T_LONG : value_result->j = *(jlong*)tos_addr; break;
596 case T_FLOAT : {
597 value_result->f = *(jfloat*)tos_addr;
598 break;
599 }
600 case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
601 case T_VOID : /* Nothing to do */ break;
602 default : ShouldNotReachHere();
603 }
604
605 return type;
606 }
607
608
interpreter_frame_tos_at(jint offset) const609 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
610 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
611 return &interpreter_frame_tos_address()[index];
612 }
613
614 #ifndef PRODUCT
615
616 #define DESCRIBE_FP_OFFSET(name) \
617 values.describe(frame_no, fp() + frame::name##_offset, #name)
618
describe_pd(FrameValues & values,int frame_no)619 void frame::describe_pd(FrameValues& values, int frame_no) {
620 if (is_interpreted_frame()) {
621 DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
622 DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
623 DESCRIBE_FP_OFFSET(interpreter_frame_method);
624 DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
625 DESCRIBE_FP_OFFSET(interpreter_frame_mirror);
626 DESCRIBE_FP_OFFSET(interpreter_frame_cache);
627 DESCRIBE_FP_OFFSET(interpreter_frame_locals);
628 DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
629 DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
630 }
631 }
632 #endif
633
initial_deoptimization_info()634 intptr_t *frame::initial_deoptimization_info() {
635 // Not used on aarch64, but we must return something.
636 return NULL;
637 }
638
real_fp() const639 intptr_t* frame::real_fp() const {
640 if (_cb != NULL) {
641 // use the frame size if valid
642 int size = _cb->frame_size();
643 if (size > 0) {
644 return unextended_sp() + size;
645 }
646 }
647 // else rely on fp()
648 assert(! is_compiled_frame(), "unknown compiled frame size");
649 return fp();
650 }
651
652 #undef DESCRIBE_FP_OFFSET
653
654 #define DESCRIBE_FP_OFFSET(name) \
655 { \
656 unsigned long *p = (unsigned long *)fp; \
657 printf("0x%016lx 0x%016lx %s\n", (unsigned long)(p + frame::name##_offset), \
658 p[frame::name##_offset], #name); \
659 }
660
661 static __thread unsigned long nextfp;
662 static __thread unsigned long nextpc;
663 static __thread unsigned long nextsp;
664 static __thread RegisterMap *reg_map;
665
printbc(Method * m,intptr_t bcx)666 static void printbc(Method *m, intptr_t bcx) {
667 const char *name;
668 char buf[16];
669 if (m->validate_bci_from_bcp((address)bcx) < 0
670 || !m->contains((address)bcx)) {
671 name = "???";
672 snprintf(buf, sizeof buf, "(bad)");
673 } else {
674 int bci = m->bci_from((address)bcx);
675 snprintf(buf, sizeof buf, "%d", bci);
676 name = Bytecodes::name(m->code_at(bci));
677 }
678 ResourceMark rm;
679 printf("%s : %s ==> %s\n", m->name_and_sig_as_C_string(), buf, name);
680 }
681
internal_pf(unsigned long sp,unsigned long fp,unsigned long pc,unsigned long bcx)682 void internal_pf(unsigned long sp, unsigned long fp, unsigned long pc, unsigned long bcx) {
683 if (! fp)
684 return;
685
686 DESCRIBE_FP_OFFSET(return_addr);
687 DESCRIBE_FP_OFFSET(link);
688 DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
689 DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
690 DESCRIBE_FP_OFFSET(interpreter_frame_method);
691 DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
692 DESCRIBE_FP_OFFSET(interpreter_frame_cache);
693 DESCRIBE_FP_OFFSET(interpreter_frame_locals);
694 DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
695 DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
696 unsigned long *p = (unsigned long *)fp;
697
698 // We want to see all frames, native and Java. For compiled and
699 // interpreted frames we have special information that allows us to
700 // unwind them; for everything else we assume that the native frame
701 // pointer chain is intact.
702 frame this_frame((intptr_t*)sp, (intptr_t*)fp, (address)pc);
703 if (this_frame.is_compiled_frame() ||
704 this_frame.is_interpreted_frame()) {
705 frame sender = this_frame.sender(reg_map);
706 nextfp = (unsigned long)sender.fp();
707 nextpc = (unsigned long)sender.pc();
708 nextsp = (unsigned long)sender.unextended_sp();
709 } else {
710 nextfp = p[frame::link_offset];
711 nextpc = p[frame::return_addr_offset];
712 nextsp = (unsigned long)&p[frame::sender_sp_offset];
713 }
714
715 if (bcx == -1ul)
716 bcx = p[frame::interpreter_frame_bcp_offset];
717
718 if (Interpreter::contains((address)pc)) {
719 Method* m = (Method*)p[frame::interpreter_frame_method_offset];
720 if(m && m->is_method()) {
721 printbc(m, bcx);
722 } else
723 printf("not a Method\n");
724 } else {
725 CodeBlob *cb = CodeCache::find_blob((address)pc);
726 if (cb != NULL) {
727 if (cb->is_nmethod()) {
728 ResourceMark rm;
729 nmethod* nm = (nmethod*)cb;
730 printf("nmethod %s\n", nm->method()->name_and_sig_as_C_string());
731 } else if (cb->name()) {
732 printf("CodeBlob %s\n", cb->name());
733 }
734 }
735 }
736 }
737
npf()738 extern "C" void npf() {
739 CodeBlob *cb = CodeCache::find_blob((address)nextpc);
740 // C2 does not always chain the frame pointers when it can, instead
741 // preferring to use fixed offsets from SP, so a simple leave() does
742 // not work. Instead, it adds the frame size to SP then pops FP and
743 // LR. We have to do the same thing to get a good call chain.
744 if (cb && cb->frame_size())
745 nextfp = nextsp + wordSize * (cb->frame_size() - 2);
746 internal_pf (nextsp, nextfp, nextpc, -1);
747 }
748
pf(unsigned long sp,unsigned long fp,unsigned long pc,unsigned long bcx,unsigned long thread)749 extern "C" void pf(unsigned long sp, unsigned long fp, unsigned long pc,
750 unsigned long bcx, unsigned long thread) {
751 if (!reg_map) {
752 reg_map = NEW_C_HEAP_OBJ(RegisterMap, mtNone);
753 ::new (reg_map) RegisterMap((JavaThread*)thread, false);
754 } else {
755 *reg_map = RegisterMap((JavaThread*)thread, false);
756 }
757
758 {
759 CodeBlob *cb = CodeCache::find_blob((address)pc);
760 if (cb && cb->frame_size())
761 fp = sp + wordSize * (cb->frame_size() - 2);
762 }
763 internal_pf(sp, fp, pc, bcx);
764 }
765
766 // support for printing out where we are in a Java method
767 // needs to be passed current fp and bcp register values
768 // prints method name, bc index and bytecode name
pm(unsigned long fp,unsigned long bcx)769 extern "C" void pm(unsigned long fp, unsigned long bcx) {
770 DESCRIBE_FP_OFFSET(interpreter_frame_method);
771 unsigned long *p = (unsigned long *)fp;
772 Method* m = (Method*)p[frame::interpreter_frame_method_offset];
773 printbc(m, bcx);
774 }
775
776 #ifndef PRODUCT
777 // This is a generic constructor which is only used by pns() in debug.cpp.
frame(void * sp,void * fp,void * pc)778 frame::frame(void* sp, void* fp, void* pc) {
779 init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
780 }
781
pd_ps()782 void frame::pd_ps() {}
783 #endif
784
make_walkable(JavaThread * thread)785 void JavaFrameAnchor::make_walkable(JavaThread* thread) {
786 // last frame set?
787 if (last_Java_sp() == NULL) return;
788 // already walkable?
789 if (walkable()) return;
790 vmassert(Thread::current() == (Thread*)thread, "not current thread");
791 vmassert(last_Java_sp() != NULL, "not called from Java code?");
792 vmassert(last_Java_pc() == NULL, "already walkable");
793 capture_last_Java_pc();
794 vmassert(walkable(), "something went wrong");
795 }
796
capture_last_Java_pc()797 void JavaFrameAnchor::capture_last_Java_pc() {
798 vmassert(_last_Java_sp != NULL, "no last frame set");
799 vmassert(_last_Java_pc == NULL, "already walkable");
800 _last_Java_pc = (address)_last_Java_sp[-1];
801 }
802