1 /*
2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/method.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "prims/methodHandles.hpp"
33 #include "runtime/frame.inline.hpp"
34 #include "runtime/handles.inline.hpp"
35 #include "runtime/javaCalls.hpp"
36 #include "runtime/monitorChunk.hpp"
37 #include "runtime/os.hpp"
38 #include "runtime/signature.hpp"
39 #include "runtime/stubCodeGenerator.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "vmreg_aarch64.inline.hpp"
42 #ifdef COMPILER1
43 #include "c1/c1_Runtime1.hpp"
44 #include "runtime/vframeArray.hpp"
45 #endif
46 
47 #ifdef ASSERT
check_location_valid()48 void RegisterMap::check_location_valid() {
49 }
50 #endif
51 
52 
53 // Profiling/safepoint support
54 
safe_for_sender(JavaThread * thread)55 bool frame::safe_for_sender(JavaThread *thread) {
56   address   sp = (address)_sp;
57   address   fp = (address)_fp;
58   address   unextended_sp = (address)_unextended_sp;
59 
60   // consider stack guards when trying to determine "safe" stack pointers
61   static size_t stack_guard_size = os::uses_stack_guard_pages() ?
62     (JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size()) : 0;
63   size_t usable_stack_size = thread->stack_size() - stack_guard_size;
64 
65   // sp must be within the usable part of the stack (not in guards)
66   bool sp_safe = (sp < thread->stack_base()) &&
67                  (sp >= thread->stack_base() - usable_stack_size);
68 
69 
70   if (!sp_safe) {
71     return false;
72   }
73 
74   // When we are running interpreted code the machine stack pointer, SP, is
75   // set low enough so that the Java expression stack can grow and shrink
76   // without ever exceeding the machine stack bounds.  So, ESP >= SP.
77 
78   // When we call out of an interpreted method, SP is incremented so that
79   // the space between SP and ESP is removed.  The SP saved in the callee's
80   // frame is the SP *before* this increment.  So, when we walk a stack of
81   // interpreter frames the sender's SP saved in a frame might be less than
82   // the SP at the point of call.
83 
84   // So unextended sp must be within the stack but we need not to check
85   // that unextended sp >= sp
86 
87   bool unextended_sp_safe = (unextended_sp < thread->stack_base());
88 
89   if (!unextended_sp_safe) {
90     return false;
91   }
92 
93   // an fp must be within the stack and above (but not equal) sp
94   // second evaluation on fp+ is added to handle situation where fp is -1
95   bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base())));
96 
97   // We know sp/unextended_sp are safe only fp is questionable here
98 
99   // If the current frame is known to the code cache then we can attempt to
100   // to construct the sender and do some validation of it. This goes a long way
101   // toward eliminating issues when we get in frame construction code
102 
103   if (_cb != NULL ) {
104 
105     // First check if frame is complete and tester is reliable
106     // Unfortunately we can only check frame complete for runtime stubs and nmethod
107     // other generic buffer blobs are more problematic so we just assume they are
108     // ok. adapter blobs never have a frame complete and are never ok.
109 
110     if (!_cb->is_frame_complete_at(_pc)) {
111       if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
112         return false;
113       }
114     }
115 
116     // Could just be some random pointer within the codeBlob
117     if (!_cb->code_contains(_pc)) {
118       return false;
119     }
120 
121     // Entry frame checks
122     if (is_entry_frame()) {
123       // an entry frame must have a valid fp.
124       return fp_safe && is_entry_frame_valid(thread);
125     }
126 
127     intptr_t* sender_sp = NULL;
128     intptr_t* sender_unextended_sp = NULL;
129     address   sender_pc = NULL;
130     intptr_t* saved_fp =  NULL;
131 
132     if (is_interpreted_frame()) {
133       // fp must be safe
134       if (!fp_safe) {
135         return false;
136       }
137 
138       sender_pc = (address) this->fp()[return_addr_offset];
139       // for interpreted frames, the value below is the sender "raw" sp,
140       // which can be different from the sender unextended sp (the sp seen
141       // by the sender) because of current frame local variables
142       sender_sp = (intptr_t*) addr_at(sender_sp_offset);
143       sender_unextended_sp = (intptr_t*) this->fp()[interpreter_frame_sender_sp_offset];
144       saved_fp = (intptr_t*) this->fp()[link_offset];
145 
146     } else {
147       // must be some sort of compiled/runtime frame
148       // fp does not have to be safe (although it could be check for c1?)
149 
150       // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
151       if (_cb->frame_size() <= 0) {
152         return false;
153       }
154 
155       sender_sp = _unextended_sp + _cb->frame_size();
156       // Is sender_sp safe?
157       if ((address)sender_sp >= thread->stack_base()) {
158         return false;
159       }
160       sender_unextended_sp = sender_sp;
161       sender_pc = (address) *(sender_sp-1);
162       // Note: frame::sender_sp_offset is only valid for compiled frame
163       saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
164     }
165 
166 
167     // If the potential sender is the interpreter then we can do some more checking
168     if (Interpreter::contains(sender_pc)) {
169 
170       // fp is always saved in a recognizable place in any code we generate. However
171       // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved fp
172       // is really a frame pointer.
173 
174       bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
175 
176       if (!saved_fp_safe) {
177         return false;
178       }
179 
180       // construct the potential sender
181 
182       frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
183 
184       return sender.is_interpreted_frame_valid(thread);
185 
186     }
187 
188     // We must always be able to find a recognizable pc
189     CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
190     if (sender_pc == NULL ||  sender_blob == NULL) {
191       return false;
192     }
193 
194     // Could be a zombie method
195     if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
196       return false;
197     }
198 
199     // Could just be some random pointer within the codeBlob
200     if (!sender_blob->code_contains(sender_pc)) {
201       return false;
202     }
203 
204     // We should never be able to see an adapter if the current frame is something from code cache
205     if (sender_blob->is_adapter_blob()) {
206       return false;
207     }
208 
209     // Could be the call_stub
210     if (StubRoutines::returns_to_call_stub(sender_pc)) {
211       bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
212 
213       if (!saved_fp_safe) {
214         return false;
215       }
216 
217       // construct the potential sender
218 
219       frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
220 
221       // Validate the JavaCallWrapper an entry frame must have
222       address jcw = (address)sender.entry_frame_call_wrapper();
223 
224       bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)sender.fp());
225 
226       return jcw_safe;
227     }
228 
229     CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
230     if (nm != NULL) {
231       if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
232           nm->method()->is_method_handle_intrinsic()) {
233         return false;
234       }
235     }
236 
237     // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
238     // because the return address counts against the callee's frame.
239 
240     if (sender_blob->frame_size() <= 0) {
241       assert(!sender_blob->is_compiled(), "should count return address at least");
242       return false;
243     }
244 
245     // We should never be able to see anything here except an nmethod. If something in the
246     // code cache (current frame) is called by an entity within the code cache that entity
247     // should not be anything but the call stub (already covered), the interpreter (already covered)
248     // or an nmethod.
249 
250     if (!sender_blob->is_compiled()) {
251         return false;
252     }
253 
254     // Could put some more validation for the potential non-interpreted sender
255     // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
256 
257     // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
258 
259     // We've validated the potential sender that would be created
260     return true;
261   }
262 
263   // Must be native-compiled frame. Since sender will try and use fp to find
264   // linkages it must be safe
265 
266   if (!fp_safe) {
267     return false;
268   }
269 
270   // Will the pc we fetch be non-zero (which we'll find at the oldest frame)
271 
272   if ( (address) this->fp()[return_addr_offset] == NULL) return false;
273 
274 
275   // could try and do some more potential verification of native frame if we could think of some...
276 
277   return true;
278 
279 }
280 
patch_pc(Thread * thread,address pc)281 void frame::patch_pc(Thread* thread, address pc) {
282   address* pc_addr = &(((address*) sp())[-1]);
283   if (TracePcPatching) {
284     tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]",
285                   p2i(pc_addr), p2i(*pc_addr), p2i(pc));
286   }
287   // Either the return address is the original one or we are going to
288   // patch in the same address that's already there.
289   assert(_pc == *pc_addr || pc == *pc_addr, "must be");
290   *pc_addr = pc;
291   _cb = CodeCache::find_blob(pc);
292   address original_pc = CompiledMethod::get_deopt_original_pc(this);
293   if (original_pc != NULL) {
294     assert(original_pc == _pc, "expected original PC to be stored before patching");
295     _deopt_state = is_deoptimized;
296     // leave _pc as is
297   } else {
298     _deopt_state = not_deoptimized;
299     _pc = pc;
300   }
301 }
302 
is_interpreted_frame() const303 bool frame::is_interpreted_frame() const  {
304   return Interpreter::contains(pc());
305 }
306 
frame_size(RegisterMap * map) const307 int frame::frame_size(RegisterMap* map) const {
308   frame sender = this->sender(map);
309   return sender.sp() - sp();
310 }
311 
entry_frame_argument_at(int offset) const312 intptr_t* frame::entry_frame_argument_at(int offset) const {
313   // convert offset to index to deal with tsi
314   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
315   // Entry frame's arguments are always in relation to unextended_sp()
316   return &unextended_sp()[index];
317 }
318 
319 // sender_sp
interpreter_frame_sender_sp() const320 intptr_t* frame::interpreter_frame_sender_sp() const {
321   assert(is_interpreted_frame(), "interpreted frame expected");
322   return (intptr_t*) at(interpreter_frame_sender_sp_offset);
323 }
324 
set_interpreter_frame_sender_sp(intptr_t * sender_sp)325 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
326   assert(is_interpreted_frame(), "interpreted frame expected");
327   ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
328 }
329 
330 
331 // monitor elements
332 
interpreter_frame_monitor_begin() const333 BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
334   return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
335 }
336 
interpreter_frame_monitor_end() const337 BasicObjectLock* frame::interpreter_frame_monitor_end() const {
338   BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
339   // make sure the pointer points inside the frame
340   assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
341   assert((intptr_t*) result < fp(),  "monitor end should be strictly below the frame pointer");
342   return result;
343 }
344 
interpreter_frame_set_monitor_end(BasicObjectLock * value)345 void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
346   *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
347 }
348 
349 // Used by template based interpreter deoptimization
interpreter_frame_set_last_sp(intptr_t * sp)350 void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
351     *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
352 }
353 
sender_for_entry_frame(RegisterMap * map) const354 frame frame::sender_for_entry_frame(RegisterMap* map) const {
355   assert(map != NULL, "map must be set");
356   // Java frame called from C; skip all C frames and return top C
357   // frame of that chunk as the sender
358   JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
359   assert(!entry_frame_is_first(), "next Java fp must be non zero");
360   assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
361   // Since we are walking the stack now this nested anchor is obviously walkable
362   // even if it wasn't when it was stacked.
363   if (!jfa->walkable()) {
364     // Capture _last_Java_pc (if needed) and mark anchor walkable.
365     jfa->capture_last_Java_pc();
366   }
367   map->clear();
368   assert(map->include_argument_oops(), "should be set by clear");
369   vmassert(jfa->last_Java_pc() != NULL, "not walkable");
370   frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
371   return fr;
372 }
373 
374 //------------------------------------------------------------------------------
375 // frame::verify_deopt_original_pc
376 //
377 // Verifies the calculated original PC of a deoptimization PC for the
378 // given unextended SP.
379 #ifdef ASSERT
verify_deopt_original_pc(CompiledMethod * nm,intptr_t * unextended_sp)380 void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
381   frame fr;
382 
383   // This is ugly but it's better than to change {get,set}_original_pc
384   // to take an SP value as argument.  And it's only a debugging
385   // method anyway.
386   fr._unextended_sp = unextended_sp;
387 
388   address original_pc = nm->get_original_pc(&fr);
389   assert(nm->insts_contains_inclusive(original_pc),
390          "original PC must be in the main code section of the the compiled method (or must be immediately following it)");
391 }
392 #endif
393 
394 //------------------------------------------------------------------------------
395 // frame::adjust_unextended_sp
adjust_unextended_sp()396 void frame::adjust_unextended_sp() {
397   // On aarch64, sites calling method handle intrinsics and lambda forms are treated
398   // as any other call site. Therefore, no special action is needed when we are
399   // returning to any of these call sites.
400 
401   if (_cb != NULL) {
402     CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
403     if (sender_cm != NULL) {
404       // If the sender PC is a deoptimization point, get the original PC.
405       if (sender_cm->is_deopt_entry(_pc) ||
406           sender_cm->is_deopt_mh_entry(_pc)) {
407         DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
408       }
409     }
410   }
411 }
412 
413 //------------------------------------------------------------------------------
414 // frame::update_map_with_saved_link
update_map_with_saved_link(RegisterMap * map,intptr_t ** link_addr)415 void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
416   // The interpreter and compiler(s) always save fp in a known
417   // location on entry. We must record where that location is
418   // so that if fp was live on callout from c2 we can find
419   // the saved copy no matter what it called.
420 
421   // Since the interpreter always saves fp if we record where it is then
422   // we don't have to always save fp on entry and exit to c2 compiled
423   // code, on entry will be enough.
424   map->set_location(rfp->as_VMReg(), (address) link_addr);
425   // this is weird "H" ought to be at a higher address however the
426   // oopMaps seems to have the "H" regs at the same address and the
427   // vanilla register.
428   // XXXX make this go away
429   if (true) {
430     map->set_location(rfp->as_VMReg()->next(), (address) link_addr);
431   }
432 }
433 
434 
435 //------------------------------------------------------------------------------
436 // frame::sender_for_interpreter_frame
sender_for_interpreter_frame(RegisterMap * map) const437 frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
438   // SP is the raw SP from the sender after adapter or interpreter
439   // extension.
440   intptr_t* sender_sp = this->sender_sp();
441 
442   // This is the sp before any possible extension (adapter/locals).
443   intptr_t* unextended_sp = interpreter_frame_sender_sp();
444 
445 #if COMPILER2_OR_JVMCI
446   if (map->update_map()) {
447     update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
448   }
449 #endif // COMPILER2_OR_JVMCI
450 
451   return frame(sender_sp, unextended_sp, link(), sender_pc());
452 }
453 
454 
455 //------------------------------------------------------------------------------
456 // frame::sender_for_compiled_frame
sender_for_compiled_frame(RegisterMap * map) const457 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
458   // we cannot rely upon the last fp having been saved to the thread
459   // in C2 code but it will have been pushed onto the stack. so we
460   // have to find it relative to the unextended sp
461 
462   assert(_cb->frame_size() >= 0, "must have non-zero frame size");
463   intptr_t* l_sender_sp = unextended_sp() + _cb->frame_size();
464   intptr_t* unextended_sp = l_sender_sp;
465 
466   // the return_address is always the word on the stack
467   address sender_pc = (address) *(l_sender_sp-1);
468 
469   intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp - frame::sender_sp_offset);
470 
471   // assert (sender_sp() == l_sender_sp, "should be");
472   // assert (*saved_fp_addr == link(), "should be");
473 
474   if (map->update_map()) {
475     // Tell GC to use argument oopmaps for some runtime stubs that need it.
476     // For C1, the runtime stub might not have oop maps, so set this flag
477     // outside of update_register_map.
478     map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
479     if (_cb->oop_maps() != NULL) {
480       OopMapSet::update_register_map(this, map);
481     }
482 
483     // Since the prolog does the save and restore of FP there is no
484     // oopmap for it so we must fill in its location as if there was
485     // an oopmap entry since if our caller was compiled code there
486     // could be live jvm state in it.
487     update_map_with_saved_link(map, saved_fp_addr);
488   }
489 
490   return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
491 }
492 
493 //------------------------------------------------------------------------------
494 // frame::sender
sender(RegisterMap * map) const495 frame frame::sender(RegisterMap* map) const {
496   // Default is we done have to follow them. The sender_for_xxx will
497   // update it accordingly
498    map->set_include_argument_oops(false);
499 
500   if (is_entry_frame())
501     return sender_for_entry_frame(map);
502   if (is_interpreted_frame())
503     return sender_for_interpreter_frame(map);
504   assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
505 
506   // This test looks odd: why is it not is_compiled_frame() ?  That's
507   // because stubs also have OOP maps.
508   if (_cb != NULL) {
509     return sender_for_compiled_frame(map);
510   }
511 
512   // Must be native-compiled frame, i.e. the marshaling code for native
513   // methods that exists in the core system.
514   return frame(sender_sp(), link(), sender_pc());
515 }
516 
is_interpreted_frame_valid(JavaThread * thread) const517 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
518   assert(is_interpreted_frame(), "Not an interpreted frame");
519   // These are reasonable sanity checks
520   if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
521     return false;
522   }
523   if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
524     return false;
525   }
526   if (fp() + interpreter_frame_initial_sp_offset < sp()) {
527     return false;
528   }
529   // These are hacks to keep us out of trouble.
530   // The problem with these is that they mask other problems
531   if (fp() <= sp()) {        // this attempts to deal with unsigned comparison above
532     return false;
533   }
534 
535   // do some validation of frame elements
536 
537   // first the method
538 
539   Method* m = *interpreter_frame_method_addr();
540 
541   // validate the method we'd find in this potential sender
542   if (!Method::is_valid_method(m)) return false;
543 
544   // stack frames shouldn't be much larger than max_stack elements
545   // this test requires the use of unextended_sp which is the sp as seen by
546   // the current frame, and not sp which is the "raw" pc which could point
547   // further because of local variables of the callee method inserted after
548   // method arguments
549   if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
550     return false;
551   }
552 
553   // validate bci/bcx
554 
555   address  bcp    = interpreter_frame_bcp();
556   if (m->validate_bci_from_bcp(bcp) < 0) {
557     return false;
558   }
559 
560   // validate constantPoolCache*
561   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
562   if (cp == NULL || !cp->is_metaspace_object()) return false;
563 
564   // validate locals
565 
566   address locals =  (address) *interpreter_frame_locals_addr();
567 
568   if (locals > thread->stack_base() || locals < (address) fp()) return false;
569 
570   // We'd have to be pretty unlucky to be mislead at this point
571   return true;
572 }
573 
interpreter_frame_result(oop * oop_result,jvalue * value_result)574 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
575   assert(is_interpreted_frame(), "interpreted frame expected");
576   Method* method = interpreter_frame_method();
577   BasicType type = method->result_type();
578 
579   intptr_t* tos_addr;
580   if (method->is_native()) {
581     // TODO : ensure AARCH64 does the same as Intel here i.e. push v0 then r0
582     // Prior to calling into the runtime to report the method_exit the possible
583     // return value is pushed to the native stack. If the result is a jfloat/jdouble
584     // then ST0 is saved before EAX/EDX. See the note in generate_native_result
585     tos_addr = (intptr_t*)sp();
586     if (type == T_FLOAT || type == T_DOUBLE) {
587       // This is times two because we do a push(ltos) after pushing XMM0
588       // and that takes two interpreter stack slots.
589       tos_addr += 2 * Interpreter::stackElementWords;
590     }
591   } else {
592     tos_addr = (intptr_t*)interpreter_frame_tos_address();
593   }
594 
595   switch (type) {
596     case T_OBJECT  :
597     case T_ARRAY   : {
598       oop obj;
599       if (method->is_native()) {
600         obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
601       } else {
602         oop* obj_p = (oop*)tos_addr;
603         obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
604       }
605       assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
606       *oop_result = obj;
607       break;
608     }
609     case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break;
610     case T_BYTE    : value_result->b = *(jbyte*)tos_addr; break;
611     case T_CHAR    : value_result->c = *(jchar*)tos_addr; break;
612     case T_SHORT   : value_result->s = *(jshort*)tos_addr; break;
613     case T_INT     : value_result->i = *(jint*)tos_addr; break;
614     case T_LONG    : value_result->j = *(jlong*)tos_addr; break;
615     case T_FLOAT   : {
616         value_result->f = *(jfloat*)tos_addr;
617       break;
618     }
619     case T_DOUBLE  : value_result->d = *(jdouble*)tos_addr; break;
620     case T_VOID    : /* Nothing to do */ break;
621     default        : ShouldNotReachHere();
622   }
623 
624   return type;
625 }
626 
627 
interpreter_frame_tos_at(jint offset) const628 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
629   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
630   return &interpreter_frame_tos_address()[index];
631 }
632 
633 #ifndef PRODUCT
634 
635 #define DESCRIBE_FP_OFFSET(name) \
636   values.describe(frame_no, fp() + frame::name##_offset, #name)
637 
describe_pd(FrameValues & values,int frame_no)638 void frame::describe_pd(FrameValues& values, int frame_no) {
639   if (is_interpreted_frame()) {
640     DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
641     DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
642     DESCRIBE_FP_OFFSET(interpreter_frame_method);
643     DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
644     DESCRIBE_FP_OFFSET(interpreter_frame_mirror);
645     DESCRIBE_FP_OFFSET(interpreter_frame_cache);
646     DESCRIBE_FP_OFFSET(interpreter_frame_locals);
647     DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
648     DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
649   }
650 }
651 #endif
652 
initial_deoptimization_info()653 intptr_t *frame::initial_deoptimization_info() {
654   // Not used on aarch64, but we must return something.
655   return NULL;
656 }
657 
real_fp() const658 intptr_t* frame::real_fp() const {
659   if (_cb != NULL) {
660     // use the frame size if valid
661     int size = _cb->frame_size();
662     if (size > 0) {
663       return unextended_sp() + size;
664     }
665   }
666   // else rely on fp()
667   assert(! is_compiled_frame(), "unknown compiled frame size");
668   return fp();
669 }
670 
671 #undef DESCRIBE_FP_OFFSET
672 
673 #define DESCRIBE_FP_OFFSET(name)                                        \
674   {                                                                     \
675     unsigned long *p = (unsigned long *)fp;                             \
676     printf("0x%016lx 0x%016lx %s\n", (unsigned long)(p + frame::name##_offset), \
677            p[frame::name##_offset], #name);                             \
678   }
679 
680 static __thread unsigned long nextfp;
681 static __thread unsigned long nextpc;
682 static __thread unsigned long nextsp;
683 static __thread RegisterMap *reg_map;
684 
printbc(Method * m,intptr_t bcx)685 static void printbc(Method *m, intptr_t bcx) {
686   const char *name;
687   char buf[16];
688   if (m->validate_bci_from_bcp((address)bcx) < 0
689       || !m->contains((address)bcx)) {
690     name = "???";
691     snprintf(buf, sizeof buf, "(bad)");
692   } else {
693     int bci = m->bci_from((address)bcx);
694     snprintf(buf, sizeof buf, "%d", bci);
695     name = Bytecodes::name(m->code_at(bci));
696   }
697   ResourceMark rm;
698   printf("%s : %s ==> %s\n", m->name_and_sig_as_C_string(), buf, name);
699 }
700 
internal_pf(unsigned long sp,unsigned long fp,unsigned long pc,unsigned long bcx)701 void internal_pf(unsigned long sp, unsigned long fp, unsigned long pc, unsigned long bcx) {
702   if (! fp)
703     return;
704 
705   DESCRIBE_FP_OFFSET(return_addr);
706   DESCRIBE_FP_OFFSET(link);
707   DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
708   DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
709   DESCRIBE_FP_OFFSET(interpreter_frame_method);
710   DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
711   DESCRIBE_FP_OFFSET(interpreter_frame_cache);
712   DESCRIBE_FP_OFFSET(interpreter_frame_locals);
713   DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
714   DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
715   unsigned long *p = (unsigned long *)fp;
716 
717   // We want to see all frames, native and Java.  For compiled and
718   // interpreted frames we have special information that allows us to
719   // unwind them; for everything else we assume that the native frame
720   // pointer chain is intact.
721   frame this_frame((intptr_t*)sp, (intptr_t*)fp, (address)pc);
722   if (this_frame.is_compiled_frame() ||
723       this_frame.is_interpreted_frame()) {
724     frame sender = this_frame.sender(reg_map);
725     nextfp = (unsigned long)sender.fp();
726     nextpc = (unsigned long)sender.pc();
727     nextsp = (unsigned long)sender.unextended_sp();
728   } else {
729     nextfp = p[frame::link_offset];
730     nextpc = p[frame::return_addr_offset];
731     nextsp = (unsigned long)&p[frame::sender_sp_offset];
732   }
733 
734   if (bcx == -1ul)
735     bcx = p[frame::interpreter_frame_bcp_offset];
736 
737   if (Interpreter::contains((address)pc)) {
738     Method* m = (Method*)p[frame::interpreter_frame_method_offset];
739     if(m && m->is_method()) {
740       printbc(m, bcx);
741     } else
742       printf("not a Method\n");
743   } else {
744     CodeBlob *cb = CodeCache::find_blob((address)pc);
745     if (cb != NULL) {
746       if (cb->is_nmethod()) {
747         ResourceMark rm;
748         nmethod* nm = (nmethod*)cb;
749         printf("nmethod %s\n", nm->method()->name_and_sig_as_C_string());
750       } else if (cb->name()) {
751         printf("CodeBlob %s\n", cb->name());
752       }
753     }
754   }
755 }
756 
npf()757 extern "C" void npf() {
758   CodeBlob *cb = CodeCache::find_blob((address)nextpc);
759   // C2 does not always chain the frame pointers when it can, instead
760   // preferring to use fixed offsets from SP, so a simple leave() does
761   // not work.  Instead, it adds the frame size to SP then pops FP and
762   // LR.  We have to do the same thing to get a good call chain.
763   if (cb && cb->frame_size())
764     nextfp = nextsp + wordSize * (cb->frame_size() - 2);
765   internal_pf (nextsp, nextfp, nextpc, -1);
766 }
767 
pf(unsigned long sp,unsigned long fp,unsigned long pc,unsigned long bcx,unsigned long thread)768 extern "C" void pf(unsigned long sp, unsigned long fp, unsigned long pc,
769                    unsigned long bcx, unsigned long thread) {
770   if (!reg_map) {
771     reg_map = NEW_C_HEAP_OBJ(RegisterMap, mtNone);
772     ::new (reg_map) RegisterMap((JavaThread*)thread, false);
773   } else {
774     *reg_map = RegisterMap((JavaThread*)thread, false);
775   }
776 
777   {
778     CodeBlob *cb = CodeCache::find_blob((address)pc);
779     if (cb && cb->frame_size())
780       fp = sp + wordSize * (cb->frame_size() - 2);
781   }
782   internal_pf(sp, fp, pc, bcx);
783 }
784 
785 // support for printing out where we are in a Java method
786 // needs to be passed current fp and bcp register values
787 // prints method name, bc index and bytecode name
pm(unsigned long fp,unsigned long bcx)788 extern "C" void pm(unsigned long fp, unsigned long bcx) {
789   DESCRIBE_FP_OFFSET(interpreter_frame_method);
790   unsigned long *p = (unsigned long *)fp;
791   Method* m = (Method*)p[frame::interpreter_frame_method_offset];
792   printbc(m, bcx);
793 }
794 
795 #ifndef PRODUCT
796 // This is a generic constructor which is only used by pns() in debug.cpp.
frame(void * sp,void * fp,void * pc)797 frame::frame(void* sp, void* fp, void* pc) {
798   init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
799 }
800 
pd_ps()801 void frame::pd_ps() {}
802 #endif
803 
make_walkable(JavaThread * thread)804 void JavaFrameAnchor::make_walkable(JavaThread* thread) {
805   // last frame set?
806   if (last_Java_sp() == NULL) return;
807   // already walkable?
808   if (walkable()) return;
809   vmassert(Thread::current() == (Thread*)thread, "not current thread");
810   vmassert(last_Java_sp() != NULL, "not called from Java code?");
811   vmassert(last_Java_pc() == NULL, "already walkable");
812   capture_last_Java_pc();
813   vmassert(walkable(), "something went wrong");
814 }
815 
capture_last_Java_pc()816 void JavaFrameAnchor::capture_last_Java_pc() {
817   vmassert(_last_Java_sp != NULL, "no last frame set");
818   vmassert(_last_Java_pc == NULL, "already walkable");
819   _last_Java_pc = (address)_last_Java_sp[-1];
820 }
821