1 /*
2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "memory/universe.hpp"
30 #include "oops/markOop.hpp"
31 #include "oops/method.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/frame.inline.hpp"
35 #include "runtime/handles.inline.hpp"
36 #include "runtime/javaCalls.hpp"
37 #include "runtime/monitorChunk.hpp"
38 #include "runtime/os.inline.hpp"
39 #include "runtime/signature.hpp"
40 #include "runtime/stubCodeGenerator.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "vmreg_aarch64.inline.hpp"
43 #ifdef COMPILER1
44 #include "c1/c1_Runtime1.hpp"
45 #include "runtime/vframeArray.hpp"
46 #endif
47 
48 #ifdef ASSERT
check_location_valid()49 void RegisterMap::check_location_valid() {
50 }
51 #endif
52 
53 
54 // Profiling/safepoint support
55 
safe_for_sender(JavaThread * thread)56 bool frame::safe_for_sender(JavaThread *thread) {
57   address   sp = (address)_sp;
58   address   fp = (address)_fp;
59   address   unextended_sp = (address)_unextended_sp;
60 
61   // consider stack guards when trying to determine "safe" stack pointers
62   static size_t stack_guard_size = os::uses_stack_guard_pages() ?
63     (JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size()) : 0;
64   size_t usable_stack_size = thread->stack_size() - stack_guard_size;
65 
66   // sp must be within the usable part of the stack (not in guards)
67   bool sp_safe = (sp < thread->stack_base()) &&
68                  (sp >= thread->stack_base() - usable_stack_size);
69 
70 
71   if (!sp_safe) {
72     return false;
73   }
74 
75   // When we are running interpreted code the machine stack pointer, SP, is
76   // set low enough so that the Java expression stack can grow and shrink
77   // without ever exceeding the machine stack bounds.  So, ESP >= SP.
78 
79   // When we call out of an interpreted method, SP is incremented so that
80   // the space between SP and ESP is removed.  The SP saved in the callee's
81   // frame is the SP *before* this increment.  So, when we walk a stack of
82   // interpreter frames the sender's SP saved in a frame might be less than
83   // the SP at the point of call.
84 
85   // So unextended sp must be within the stack but we need not to check
86   // that unextended sp >= sp
87 
88   bool unextended_sp_safe = (unextended_sp < thread->stack_base());
89 
90   if (!unextended_sp_safe) {
91     return false;
92   }
93 
94   // an fp must be within the stack and above (but not equal) sp
95   // second evaluation on fp+ is added to handle situation where fp is -1
96   bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base())));
97 
98   // We know sp/unextended_sp are safe only fp is questionable here
99 
100   // If the current frame is known to the code cache then we can attempt to
101   // to construct the sender and do some validation of it. This goes a long way
102   // toward eliminating issues when we get in frame construction code
103 
104   if (_cb != NULL ) {
105 
106     // First check if frame is complete and tester is reliable
107     // Unfortunately we can only check frame complete for runtime stubs and nmethod
108     // other generic buffer blobs are more problematic so we just assume they are
109     // ok. adapter blobs never have a frame complete and are never ok.
110 
111     if (!_cb->is_frame_complete_at(_pc)) {
112       if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
113         return false;
114       }
115     }
116 
117     // Could just be some random pointer within the codeBlob
118     if (!_cb->code_contains(_pc)) {
119       return false;
120     }
121 
122     // Entry frame checks
123     if (is_entry_frame()) {
124       // an entry frame must have a valid fp.
125       return fp_safe && is_entry_frame_valid(thread);
126     }
127 
128     intptr_t* sender_sp = NULL;
129     intptr_t* sender_unextended_sp = NULL;
130     address   sender_pc = NULL;
131     intptr_t* saved_fp =  NULL;
132 
133     if (is_interpreted_frame()) {
134       // fp must be safe
135       if (!fp_safe) {
136         return false;
137       }
138 
139       sender_pc = (address) this->fp()[return_addr_offset];
140       // for interpreted frames, the value below is the sender "raw" sp,
141       // which can be different from the sender unextended sp (the sp seen
142       // by the sender) because of current frame local variables
143       sender_sp = (intptr_t*) addr_at(sender_sp_offset);
144       sender_unextended_sp = (intptr_t*) this->fp()[interpreter_frame_sender_sp_offset];
145       saved_fp = (intptr_t*) this->fp()[link_offset];
146 
147     } else {
148       // must be some sort of compiled/runtime frame
149       // fp does not have to be safe (although it could be check for c1?)
150 
151       // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
152       if (_cb->frame_size() <= 0) {
153         return false;
154       }
155 
156       sender_sp = _unextended_sp + _cb->frame_size();
157       // Is sender_sp safe?
158       if ((address)sender_sp >= thread->stack_base()) {
159         return false;
160       }
161       sender_unextended_sp = sender_sp;
162       sender_pc = (address) *(sender_sp-1);
163       // Note: frame::sender_sp_offset is only valid for compiled frame
164       saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
165     }
166 
167 
168     // If the potential sender is the interpreter then we can do some more checking
169     if (Interpreter::contains(sender_pc)) {
170 
171       // fp is always saved in a recognizable place in any code we generate. However
172       // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved fp
173       // is really a frame pointer.
174 
175       bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
176 
177       if (!saved_fp_safe) {
178         return false;
179       }
180 
181       // construct the potential sender
182 
183       frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
184 
185       return sender.is_interpreted_frame_valid(thread);
186 
187     }
188 
189     // We must always be able to find a recognizable pc
190     CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
191     if (sender_pc == NULL ||  sender_blob == NULL) {
192       return false;
193     }
194 
195     // Could be a zombie method
196     if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
197       return false;
198     }
199 
200     // Could just be some random pointer within the codeBlob
201     if (!sender_blob->code_contains(sender_pc)) {
202       return false;
203     }
204 
205     // We should never be able to see an adapter if the current frame is something from code cache
206     if (sender_blob->is_adapter_blob()) {
207       return false;
208     }
209 
210     // Could be the call_stub
211     if (StubRoutines::returns_to_call_stub(sender_pc)) {
212       bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
213 
214       if (!saved_fp_safe) {
215         return false;
216       }
217 
218       // construct the potential sender
219 
220       frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
221 
222       // Validate the JavaCallWrapper an entry frame must have
223       address jcw = (address)sender.entry_frame_call_wrapper();
224 
225       bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)sender.fp());
226 
227       return jcw_safe;
228     }
229 
230     CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
231     if (nm != NULL) {
232       if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
233           nm->method()->is_method_handle_intrinsic()) {
234         return false;
235       }
236     }
237 
238     // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
239     // because the return address counts against the callee's frame.
240 
241     if (sender_blob->frame_size() <= 0) {
242       assert(!sender_blob->is_compiled(), "should count return address at least");
243       return false;
244     }
245 
246     // We should never be able to see anything here except an nmethod. If something in the
247     // code cache (current frame) is called by an entity within the code cache that entity
248     // should not be anything but the call stub (already covered), the interpreter (already covered)
249     // or an nmethod.
250 
251     if (!sender_blob->is_compiled()) {
252         return false;
253     }
254 
255     // Could put some more validation for the potential non-interpreted sender
256     // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
257 
258     // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
259 
260     // We've validated the potential sender that would be created
261     return true;
262   }
263 
264   // Must be native-compiled frame. Since sender will try and use fp to find
265   // linkages it must be safe
266 
267   if (!fp_safe) {
268     return false;
269   }
270 
271   // Will the pc we fetch be non-zero (which we'll find at the oldest frame)
272 
273   if ( (address) this->fp()[return_addr_offset] == NULL) return false;
274 
275 
276   // could try and do some more potential verification of native frame if we could think of some...
277 
278   return true;
279 
280 }
281 
patch_pc(Thread * thread,address pc)282 void frame::patch_pc(Thread* thread, address pc) {
283   address* pc_addr = &(((address*) sp())[-1]);
284   if (TracePcPatching) {
285     tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]",
286                   p2i(pc_addr), p2i(*pc_addr), p2i(pc));
287   }
288   // Either the return address is the original one or we are going to
289   // patch in the same address that's already there.
290   assert(_pc == *pc_addr || pc == *pc_addr, "must be");
291   *pc_addr = pc;
292   _cb = CodeCache::find_blob(pc);
293   address original_pc = CompiledMethod::get_deopt_original_pc(this);
294   if (original_pc != NULL) {
295     assert(original_pc == _pc, "expected original PC to be stored before patching");
296     _deopt_state = is_deoptimized;
297     // leave _pc as is
298   } else {
299     _deopt_state = not_deoptimized;
300     _pc = pc;
301   }
302 }
303 
is_interpreted_frame() const304 bool frame::is_interpreted_frame() const  {
305   return Interpreter::contains(pc());
306 }
307 
frame_size(RegisterMap * map) const308 int frame::frame_size(RegisterMap* map) const {
309   frame sender = this->sender(map);
310   return sender.sp() - sp();
311 }
312 
entry_frame_argument_at(int offset) const313 intptr_t* frame::entry_frame_argument_at(int offset) const {
314   // convert offset to index to deal with tsi
315   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
316   // Entry frame's arguments are always in relation to unextended_sp()
317   return &unextended_sp()[index];
318 }
319 
320 // sender_sp
interpreter_frame_sender_sp() const321 intptr_t* frame::interpreter_frame_sender_sp() const {
322   assert(is_interpreted_frame(), "interpreted frame expected");
323   return (intptr_t*) at(interpreter_frame_sender_sp_offset);
324 }
325 
set_interpreter_frame_sender_sp(intptr_t * sender_sp)326 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
327   assert(is_interpreted_frame(), "interpreted frame expected");
328   ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
329 }
330 
331 
332 // monitor elements
333 
interpreter_frame_monitor_begin() const334 BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
335   return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
336 }
337 
interpreter_frame_monitor_end() const338 BasicObjectLock* frame::interpreter_frame_monitor_end() const {
339   BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
340   // make sure the pointer points inside the frame
341   assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
342   assert((intptr_t*) result < fp(),  "monitor end should be strictly below the frame pointer");
343   return result;
344 }
345 
interpreter_frame_set_monitor_end(BasicObjectLock * value)346 void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
347   *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
348 }
349 
350 // Used by template based interpreter deoptimization
interpreter_frame_set_last_sp(intptr_t * sp)351 void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
352     *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
353 }
354 
sender_for_entry_frame(RegisterMap * map) const355 frame frame::sender_for_entry_frame(RegisterMap* map) const {
356   assert(map != NULL, "map must be set");
357   // Java frame called from C; skip all C frames and return top C
358   // frame of that chunk as the sender
359   JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
360   assert(!entry_frame_is_first(), "next Java fp must be non zero");
361   assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
362   // Since we are walking the stack now this nested anchor is obviously walkable
363   // even if it wasn't when it was stacked.
364   if (!jfa->walkable()) {
365     // Capture _last_Java_pc (if needed) and mark anchor walkable.
366     jfa->capture_last_Java_pc();
367   }
368   map->clear();
369   assert(map->include_argument_oops(), "should be set by clear");
370   vmassert(jfa->last_Java_pc() != NULL, "not walkable");
371   frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
372   return fr;
373 }
374 
375 //------------------------------------------------------------------------------
376 // frame::verify_deopt_original_pc
377 //
378 // Verifies the calculated original PC of a deoptimization PC for the
379 // given unextended SP.
380 #ifdef ASSERT
verify_deopt_original_pc(CompiledMethod * nm,intptr_t * unextended_sp)381 void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
382   frame fr;
383 
384   // This is ugly but it's better than to change {get,set}_original_pc
385   // to take an SP value as argument.  And it's only a debugging
386   // method anyway.
387   fr._unextended_sp = unextended_sp;
388 
389   address original_pc = nm->get_original_pc(&fr);
390   assert(nm->insts_contains_inclusive(original_pc),
391          "original PC must be in the main code section of the the compiled method (or must be immediately following it)");
392 }
393 #endif
394 
395 //------------------------------------------------------------------------------
396 // frame::adjust_unextended_sp
adjust_unextended_sp()397 void frame::adjust_unextended_sp() {
398   // On aarch64, sites calling method handle intrinsics and lambda forms are treated
399   // as any other call site. Therefore, no special action is needed when we are
400   // returning to any of these call sites.
401 
402   if (_cb != NULL) {
403     CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
404     if (sender_cm != NULL) {
405       // If the sender PC is a deoptimization point, get the original PC.
406       if (sender_cm->is_deopt_entry(_pc) ||
407           sender_cm->is_deopt_mh_entry(_pc)) {
408         DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
409       }
410     }
411   }
412 }
413 
414 //------------------------------------------------------------------------------
415 // frame::update_map_with_saved_link
update_map_with_saved_link(RegisterMap * map,intptr_t ** link_addr)416 void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
417   // The interpreter and compiler(s) always save fp in a known
418   // location on entry. We must record where that location is
419   // so that if fp was live on callout from c2 we can find
420   // the saved copy no matter what it called.
421 
422   // Since the interpreter always saves fp if we record where it is then
423   // we don't have to always save fp on entry and exit to c2 compiled
424   // code, on entry will be enough.
425   map->set_location(rfp->as_VMReg(), (address) link_addr);
426   // this is weird "H" ought to be at a higher address however the
427   // oopMaps seems to have the "H" regs at the same address and the
428   // vanilla register.
429   // XXXX make this go away
430   if (true) {
431     map->set_location(rfp->as_VMReg()->next(), (address) link_addr);
432   }
433 }
434 
435 
436 //------------------------------------------------------------------------------
437 // frame::sender_for_interpreter_frame
sender_for_interpreter_frame(RegisterMap * map) const438 frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
439   // SP is the raw SP from the sender after adapter or interpreter
440   // extension.
441   intptr_t* sender_sp = this->sender_sp();
442 
443   // This is the sp before any possible extension (adapter/locals).
444   intptr_t* unextended_sp = interpreter_frame_sender_sp();
445 
446 #if COMPILER2_OR_JVMCI
447   if (map->update_map()) {
448     update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
449   }
450 #endif // COMPILER2_OR_JVMCI
451 
452   return frame(sender_sp, unextended_sp, link(), sender_pc());
453 }
454 
455 
456 //------------------------------------------------------------------------------
457 // frame::sender_for_compiled_frame
sender_for_compiled_frame(RegisterMap * map) const458 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
459   // we cannot rely upon the last fp having been saved to the thread
460   // in C2 code but it will have been pushed onto the stack. so we
461   // have to find it relative to the unextended sp
462 
463   assert(_cb->frame_size() >= 0, "must have non-zero frame size");
464   intptr_t* l_sender_sp = unextended_sp() + _cb->frame_size();
465   intptr_t* unextended_sp = l_sender_sp;
466 
467   // the return_address is always the word on the stack
468   address sender_pc = (address) *(l_sender_sp-1);
469 
470   intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp - frame::sender_sp_offset);
471 
472   // assert (sender_sp() == l_sender_sp, "should be");
473   // assert (*saved_fp_addr == link(), "should be");
474 
475   if (map->update_map()) {
476     // Tell GC to use argument oopmaps for some runtime stubs that need it.
477     // For C1, the runtime stub might not have oop maps, so set this flag
478     // outside of update_register_map.
479     map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
480     if (_cb->oop_maps() != NULL) {
481       OopMapSet::update_register_map(this, map);
482     }
483 
484     // Since the prolog does the save and restore of FP there is no
485     // oopmap for it so we must fill in its location as if there was
486     // an oopmap entry since if our caller was compiled code there
487     // could be live jvm state in it.
488     update_map_with_saved_link(map, saved_fp_addr);
489   }
490 
491   return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
492 }
493 
494 //------------------------------------------------------------------------------
495 // frame::sender
sender(RegisterMap * map) const496 frame frame::sender(RegisterMap* map) const {
497   // Default is we done have to follow them. The sender_for_xxx will
498   // update it accordingly
499    map->set_include_argument_oops(false);
500 
501   if (is_entry_frame())
502     return sender_for_entry_frame(map);
503   if (is_interpreted_frame())
504     return sender_for_interpreter_frame(map);
505   assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
506 
507   // This test looks odd: why is it not is_compiled_frame() ?  That's
508   // because stubs also have OOP maps.
509   if (_cb != NULL) {
510     return sender_for_compiled_frame(map);
511   }
512 
513   // Must be native-compiled frame, i.e. the marshaling code for native
514   // methods that exists in the core system.
515   return frame(sender_sp(), link(), sender_pc());
516 }
517 
is_interpreted_frame_valid(JavaThread * thread) const518 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
519   assert(is_interpreted_frame(), "Not an interpreted frame");
520   // These are reasonable sanity checks
521   if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
522     return false;
523   }
524   if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
525     return false;
526   }
527   if (fp() + interpreter_frame_initial_sp_offset < sp()) {
528     return false;
529   }
530   // These are hacks to keep us out of trouble.
531   // The problem with these is that they mask other problems
532   if (fp() <= sp()) {        // this attempts to deal with unsigned comparison above
533     return false;
534   }
535 
536   // do some validation of frame elements
537 
538   // first the method
539 
540   Method* m = *interpreter_frame_method_addr();
541 
542   // validate the method we'd find in this potential sender
543   if (!Method::is_valid_method(m)) return false;
544 
545   // stack frames shouldn't be much larger than max_stack elements
546   // this test requires the use of unextended_sp which is the sp as seen by
547   // the current frame, and not sp which is the "raw" pc which could point
548   // further because of local variables of the callee method inserted after
549   // method arguments
550   if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
551     return false;
552   }
553 
554   // validate bci/bcx
555 
556   address  bcp    = interpreter_frame_bcp();
557   if (m->validate_bci_from_bcp(bcp) < 0) {
558     return false;
559   }
560 
561   // validate constantPoolCache*
562   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
563   if (MetaspaceObj::is_valid(cp) == false) return false;
564 
565   // validate locals
566 
567   address locals =  (address) *interpreter_frame_locals_addr();
568 
569   if (locals > thread->stack_base() || locals < (address) fp()) return false;
570 
571   // We'd have to be pretty unlucky to be mislead at this point
572   return true;
573 }
574 
interpreter_frame_result(oop * oop_result,jvalue * value_result)575 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
576   assert(is_interpreted_frame(), "interpreted frame expected");
577   Method* method = interpreter_frame_method();
578   BasicType type = method->result_type();
579 
580   intptr_t* tos_addr;
581   if (method->is_native()) {
582     // TODO : ensure AARCH64 does the same as Intel here i.e. push v0 then r0
583     // Prior to calling into the runtime to report the method_exit the possible
584     // return value is pushed to the native stack. If the result is a jfloat/jdouble
585     // then ST0 is saved before EAX/EDX. See the note in generate_native_result
586     tos_addr = (intptr_t*)sp();
587     if (type == T_FLOAT || type == T_DOUBLE) {
588       // This is times two because we do a push(ltos) after pushing XMM0
589       // and that takes two interpreter stack slots.
590       tos_addr += 2 * Interpreter::stackElementWords;
591     }
592   } else {
593     tos_addr = (intptr_t*)interpreter_frame_tos_address();
594   }
595 
596   switch (type) {
597     case T_OBJECT  :
598     case T_ARRAY   : {
599       oop obj;
600       if (method->is_native()) {
601         obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
602       } else {
603         oop* obj_p = (oop*)tos_addr;
604         obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
605       }
606       assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
607       *oop_result = obj;
608       break;
609     }
610     case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break;
611     case T_BYTE    : value_result->b = *(jbyte*)tos_addr; break;
612     case T_CHAR    : value_result->c = *(jchar*)tos_addr; break;
613     case T_SHORT   : value_result->s = *(jshort*)tos_addr; break;
614     case T_INT     : value_result->i = *(jint*)tos_addr; break;
615     case T_LONG    : value_result->j = *(jlong*)tos_addr; break;
616     case T_FLOAT   : {
617         value_result->f = *(jfloat*)tos_addr;
618       break;
619     }
620     case T_DOUBLE  : value_result->d = *(jdouble*)tos_addr; break;
621     case T_VOID    : /* Nothing to do */ break;
622     default        : ShouldNotReachHere();
623   }
624 
625   return type;
626 }
627 
628 
interpreter_frame_tos_at(jint offset) const629 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
630   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
631   return &interpreter_frame_tos_address()[index];
632 }
633 
634 #ifndef PRODUCT
635 
636 #define DESCRIBE_FP_OFFSET(name) \
637   values.describe(frame_no, fp() + frame::name##_offset, #name)
638 
describe_pd(FrameValues & values,int frame_no)639 void frame::describe_pd(FrameValues& values, int frame_no) {
640   if (is_interpreted_frame()) {
641     DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
642     DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
643     DESCRIBE_FP_OFFSET(interpreter_frame_method);
644     DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
645     DESCRIBE_FP_OFFSET(interpreter_frame_mirror);
646     DESCRIBE_FP_OFFSET(interpreter_frame_cache);
647     DESCRIBE_FP_OFFSET(interpreter_frame_locals);
648     DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
649     DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
650   }
651 }
652 #endif
653 
initial_deoptimization_info()654 intptr_t *frame::initial_deoptimization_info() {
655   // Not used on aarch64, but we must return something.
656   return NULL;
657 }
658 
real_fp() const659 intptr_t* frame::real_fp() const {
660   if (_cb != NULL) {
661     // use the frame size if valid
662     int size = _cb->frame_size();
663     if (size > 0) {
664       return unextended_sp() + size;
665     }
666   }
667   // else rely on fp()
668   assert(! is_compiled_frame(), "unknown compiled frame size");
669   return fp();
670 }
671 
672 #undef DESCRIBE_FP_OFFSET
673 
674 #define DESCRIBE_FP_OFFSET(name)                                        \
675   {                                                                     \
676     unsigned long *p = (unsigned long *)fp;                             \
677     printf("0x%016lx 0x%016lx %s\n", (unsigned long)(p + frame::name##_offset), \
678            p[frame::name##_offset], #name);                             \
679   }
680 
681 static __thread unsigned long nextfp;
682 static __thread unsigned long nextpc;
683 static __thread unsigned long nextsp;
684 static __thread RegisterMap *reg_map;
685 
printbc(Method * m,intptr_t bcx)686 static void printbc(Method *m, intptr_t bcx) {
687   const char *name;
688   char buf[16];
689   if (m->validate_bci_from_bcp((address)bcx) < 0
690       || !m->contains((address)bcx)) {
691     name = "???";
692     snprintf(buf, sizeof buf, "(bad)");
693   } else {
694     int bci = m->bci_from((address)bcx);
695     snprintf(buf, sizeof buf, "%d", bci);
696     name = Bytecodes::name(m->code_at(bci));
697   }
698   ResourceMark rm;
699   printf("%s : %s ==> %s\n", m->name_and_sig_as_C_string(), buf, name);
700 }
701 
internal_pf(unsigned long sp,unsigned long fp,unsigned long pc,unsigned long bcx)702 void internal_pf(unsigned long sp, unsigned long fp, unsigned long pc, unsigned long bcx) {
703   if (! fp)
704     return;
705 
706   DESCRIBE_FP_OFFSET(return_addr);
707   DESCRIBE_FP_OFFSET(link);
708   DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
709   DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
710   DESCRIBE_FP_OFFSET(interpreter_frame_method);
711   DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
712   DESCRIBE_FP_OFFSET(interpreter_frame_cache);
713   DESCRIBE_FP_OFFSET(interpreter_frame_locals);
714   DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
715   DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
716   unsigned long *p = (unsigned long *)fp;
717 
718   // We want to see all frames, native and Java.  For compiled and
719   // interpreted frames we have special information that allows us to
720   // unwind them; for everything else we assume that the native frame
721   // pointer chain is intact.
722   frame this_frame((intptr_t*)sp, (intptr_t*)fp, (address)pc);
723   if (this_frame.is_compiled_frame() ||
724       this_frame.is_interpreted_frame()) {
725     frame sender = this_frame.sender(reg_map);
726     nextfp = (unsigned long)sender.fp();
727     nextpc = (unsigned long)sender.pc();
728     nextsp = (unsigned long)sender.unextended_sp();
729   } else {
730     nextfp = p[frame::link_offset];
731     nextpc = p[frame::return_addr_offset];
732     nextsp = (unsigned long)&p[frame::sender_sp_offset];
733   }
734 
735   if (bcx == -1ul)
736     bcx = p[frame::interpreter_frame_bcp_offset];
737 
738   if (Interpreter::contains((address)pc)) {
739     Method* m = (Method*)p[frame::interpreter_frame_method_offset];
740     if(m && m->is_method()) {
741       printbc(m, bcx);
742     } else
743       printf("not a Method\n");
744   } else {
745     CodeBlob *cb = CodeCache::find_blob((address)pc);
746     if (cb != NULL) {
747       if (cb->is_nmethod()) {
748         ResourceMark rm;
749         nmethod* nm = (nmethod*)cb;
750         printf("nmethod %s\n", nm->method()->name_and_sig_as_C_string());
751       } else if (cb->name()) {
752         printf("CodeBlob %s\n", cb->name());
753       }
754     }
755   }
756 }
757 
npf()758 extern "C" void npf() {
759   CodeBlob *cb = CodeCache::find_blob((address)nextpc);
760   // C2 does not always chain the frame pointers when it can, instead
761   // preferring to use fixed offsets from SP, so a simple leave() does
762   // not work.  Instead, it adds the frame size to SP then pops FP and
763   // LR.  We have to do the same thing to get a good call chain.
764   if (cb && cb->frame_size())
765     nextfp = nextsp + wordSize * (cb->frame_size() - 2);
766   internal_pf (nextsp, nextfp, nextpc, -1);
767 }
768 
pf(unsigned long sp,unsigned long fp,unsigned long pc,unsigned long bcx,unsigned long thread)769 extern "C" void pf(unsigned long sp, unsigned long fp, unsigned long pc,
770                    unsigned long bcx, unsigned long thread) {
771   if (!reg_map) {
772     reg_map = NEW_C_HEAP_OBJ(RegisterMap, mtNone);
773     ::new (reg_map) RegisterMap((JavaThread*)thread, false);
774   } else {
775     *reg_map = RegisterMap((JavaThread*)thread, false);
776   }
777 
778   {
779     CodeBlob *cb = CodeCache::find_blob((address)pc);
780     if (cb && cb->frame_size())
781       fp = sp + wordSize * (cb->frame_size() - 2);
782   }
783   internal_pf(sp, fp, pc, bcx);
784 }
785 
786 // support for printing out where we are in a Java method
787 // needs to be passed current fp and bcp register values
788 // prints method name, bc index and bytecode name
pm(unsigned long fp,unsigned long bcx)789 extern "C" void pm(unsigned long fp, unsigned long bcx) {
790   DESCRIBE_FP_OFFSET(interpreter_frame_method);
791   unsigned long *p = (unsigned long *)fp;
792   Method* m = (Method*)p[frame::interpreter_frame_method_offset];
793   printbc(m, bcx);
794 }
795 
796 #ifndef PRODUCT
797 // This is a generic constructor which is only used by pns() in debug.cpp.
frame(void * sp,void * fp,void * pc)798 frame::frame(void* sp, void* fp, void* pc) {
799   init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
800 }
801 
pd_ps()802 void frame::pd_ps() {}
803 #endif
804 
make_walkable(JavaThread * thread)805 void JavaFrameAnchor::make_walkable(JavaThread* thread) {
806   // last frame set?
807   if (last_Java_sp() == NULL) return;
808   // already walkable?
809   if (walkable()) return;
810   vmassert(Thread::current() == (Thread*)thread, "not current thread");
811   vmassert(last_Java_sp() != NULL, "not called from Java code?");
812   vmassert(last_Java_pc() == NULL, "already walkable");
813   capture_last_Java_pc();
814   vmassert(walkable(), "something went wrong");
815 }
816 
capture_last_Java_pc()817 void JavaFrameAnchor::capture_last_Java_pc() {
818   vmassert(_last_Java_sp != NULL, "no last frame set");
819   vmassert(_last_Java_pc == NULL, "already walkable");
820   _last_Java_pc = (address)_last_Java_sp[-1];
821 }
822