1 /*
2 * Copyright (c) 2013, Red Hat Inc.
3 * Copyright (c) 1997, 2019, Oracle and/or its affiliates.
4 * All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/markOop.hpp"
31 #include "oops/method.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/frame.inline.hpp"
35 #include "runtime/handles.inline.hpp"
36 #include "runtime/javaCalls.hpp"
37 #include "runtime/monitorChunk.hpp"
38 #include "runtime/os.hpp"
39 #include "runtime/signature.hpp"
40 #include "runtime/stubCodeGenerator.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "vmreg_aarch64.inline.hpp"
43 #ifdef COMPILER1
44 #include "c1/c1_Runtime1.hpp"
45 #include "runtime/vframeArray.hpp"
46 #endif
47
48 #ifdef ASSERT
check_location_valid()49 void RegisterMap::check_location_valid() {
50 }
51 #endif
52
53
54 // Profiling/safepoint support
55
safe_for_sender(JavaThread * thread)56 bool frame::safe_for_sender(JavaThread *thread) {
57 address sp = (address)_sp;
58 address fp = (address)_fp;
59 address unextended_sp = (address)_unextended_sp;
60
61 // consider stack guards when trying to determine "safe" stack pointers
62 static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
63 size_t usable_stack_size = thread->stack_size() - stack_guard_size;
64
65 // sp must be within the usable part of the stack (not in guards)
66 bool sp_safe = (sp < thread->stack_base()) &&
67 (sp >= thread->stack_base() - usable_stack_size);
68
69
70 if (!sp_safe) {
71 return false;
72 }
73
74 // When we are running interpreted code the machine stack pointer, SP, is
75 // set low enough so that the Java expression stack can grow and shrink
76 // without ever exceeding the machine stack bounds. So, ESP >= SP.
77
78 // When we call out of an interpreted method, SP is incremented so that
79 // the space between SP and ESP is removed. The SP saved in the callee's
80 // frame is the SP *before* this increment. So, when we walk a stack of
81 // interpreter frames the sender's SP saved in a frame might be less than
82 // the SP at the point of call.
83
84 // So unextended sp must be within the stack but we need not to check
85 // that unextended sp >= sp
86
87 bool unextended_sp_safe = (unextended_sp < thread->stack_base());
88
89 if (!unextended_sp_safe) {
90 return false;
91 }
92
93 // an fp must be within the stack and above (but not equal) sp
94 // second evaluation on fp+ is added to handle situation where fp is -1
95 bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base())));
96
97 // We know sp/unextended_sp are safe only fp is questionable here
98
99 // If the current frame is known to the code cache then we can attempt to
100 // to construct the sender and do some validation of it. This goes a long way
101 // toward eliminating issues when we get in frame construction code
102
103 if (_cb != NULL ) {
104
105 // First check if frame is complete and tester is reliable
106 // Unfortunately we can only check frame complete for runtime stubs and nmethod
107 // other generic buffer blobs are more problematic so we just assume they are
108 // ok. adapter blobs never have a frame complete and are never ok.
109
110 if (!_cb->is_frame_complete_at(_pc)) {
111 if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
112 return false;
113 }
114 }
115
116 // Could just be some random pointer within the codeBlob
117 if (!_cb->code_contains(_pc)) {
118 return false;
119 }
120
121 // Entry frame checks
122 if (is_entry_frame()) {
123 // an entry frame must have a valid fp.
124
125 if (!fp_safe) return false;
126
127 // Validate the JavaCallWrapper an entry frame must have
128
129 address jcw = (address)entry_frame_call_wrapper();
130
131 bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp);
132
133 return jcw_safe;
134
135 }
136
137 intptr_t* sender_sp = NULL;
138 intptr_t* sender_unextended_sp = NULL;
139 address sender_pc = NULL;
140 intptr_t* saved_fp = NULL;
141
142 if (is_interpreted_frame()) {
143 // fp must be safe
144 if (!fp_safe) {
145 return false;
146 }
147
148 sender_pc = (address) this->fp()[return_addr_offset];
149 // for interpreted frames, the value below is the sender "raw" sp,
150 // which can be different from the sender unextended sp (the sp seen
151 // by the sender) because of current frame local variables
152 sender_sp = (intptr_t*) addr_at(sender_sp_offset);
153 sender_unextended_sp = (intptr_t*) this->fp()[interpreter_frame_sender_sp_offset];
154 saved_fp = (intptr_t*) this->fp()[link_offset];
155
156 } else {
157 // must be some sort of compiled/runtime frame
158 // fp does not have to be safe (although it could be check for c1?)
159
160 // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
161 if (_cb->frame_size() <= 0) {
162 return false;
163 }
164
165 sender_sp = _unextended_sp + _cb->frame_size();
166 sender_unextended_sp = sender_sp;
167 sender_pc = (address) *(sender_sp-1);
168 // Note: frame::sender_sp_offset is only valid for compiled frame
169 saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
170 }
171
172
173 // If the potential sender is the interpreter then we can do some more checking
174 if (Interpreter::contains(sender_pc)) {
175
176 // fp is always saved in a recognizable place in any code we generate. However
177 // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved fp
178 // is really a frame pointer.
179
180 bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
181
182 if (!saved_fp_safe) {
183 return false;
184 }
185
186 // construct the potential sender
187
188 frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
189
190 return sender.is_interpreted_frame_valid(thread);
191
192 }
193
194 // We must always be able to find a recognizable pc
195 CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
196 if (sender_pc == NULL || sender_blob == NULL) {
197 return false;
198 }
199
200 // Could be a zombie method
201 if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
202 return false;
203 }
204
205 // Could just be some random pointer within the codeBlob
206 if (!sender_blob->code_contains(sender_pc)) {
207 return false;
208 }
209
210 // We should never be able to see an adapter if the current frame is something from code cache
211 if (sender_blob->is_adapter_blob()) {
212 return false;
213 }
214
215 // Could be the call_stub
216 if (StubRoutines::returns_to_call_stub(sender_pc)) {
217 bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
218
219 if (!saved_fp_safe) {
220 return false;
221 }
222
223 // construct the potential sender
224
225 frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
226
227 // Validate the JavaCallWrapper an entry frame must have
228 address jcw = (address)sender.entry_frame_call_wrapper();
229
230 bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp());
231
232 return jcw_safe;
233 }
234
235 if (sender_blob->is_nmethod()) {
236 nmethod* nm = sender_blob->as_nmethod_or_null();
237 if (nm != NULL) {
238 if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc)) {
239 return false;
240 }
241 }
242 }
243
244 // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
245 // because the return address counts against the callee's frame.
246
247 if (sender_blob->frame_size() <= 0) {
248 assert(!sender_blob->is_nmethod(), "should count return address at least");
249 return false;
250 }
251
252 // We should never be able to see anything here except an nmethod. If something in the
253 // code cache (current frame) is called by an entity within the code cache that entity
254 // should not be anything but the call stub (already covered), the interpreter (already covered)
255 // or an nmethod.
256
257 if (!sender_blob->is_nmethod()) {
258 return false;
259 }
260
261 // Could put some more validation for the potential non-interpreted sender
262 // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
263
264 // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
265
266 // We've validated the potential sender that would be created
267 return true;
268 }
269
270 // Must be native-compiled frame. Since sender will try and use fp to find
271 // linkages it must be safe
272
273 if (!fp_safe) {
274 return false;
275 }
276
277 // Will the pc we fetch be non-zero (which we'll find at the oldest frame)
278
279 if ( (address) this->fp()[return_addr_offset] == NULL) return false;
280
281
282 // could try and do some more potential verification of native frame if we could think of some...
283
284 return true;
285
286 }
287
patch_pc(Thread * thread,address pc)288 void frame::patch_pc(Thread* thread, address pc) {
289 address* pc_addr = &(((address*) sp())[-1]);
290 if (TracePcPatching) {
291 tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]",
292 p2i(pc_addr), p2i(*pc_addr), p2i(pc));
293 }
294 // Either the return address is the original one or we are going to
295 // patch in the same address that's already there.
296 assert(_pc == *pc_addr || pc == *pc_addr, "must be");
297 *pc_addr = pc;
298 _cb = CodeCache::find_blob(pc);
299 address original_pc = nmethod::get_deopt_original_pc(this);
300 if (original_pc != NULL) {
301 assert(original_pc == _pc, "expected original PC to be stored before patching");
302 _deopt_state = is_deoptimized;
303 // leave _pc as is
304 } else {
305 _deopt_state = not_deoptimized;
306 _pc = pc;
307 }
308 }
309
is_interpreted_frame() const310 bool frame::is_interpreted_frame() const {
311 return Interpreter::contains(pc());
312 }
313
frame_size(RegisterMap * map) const314 int frame::frame_size(RegisterMap* map) const {
315 frame sender = this->sender(map);
316 return sender.sp() - sp();
317 }
318
entry_frame_argument_at(int offset) const319 intptr_t* frame::entry_frame_argument_at(int offset) const {
320 // convert offset to index to deal with tsi
321 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
322 // Entry frame's arguments are always in relation to unextended_sp()
323 return &unextended_sp()[index];
324 }
325
326 // sender_sp
327 #ifdef CC_INTERP
interpreter_frame_sender_sp() const328 intptr_t* frame::interpreter_frame_sender_sp() const {
329 assert(is_interpreted_frame(), "interpreted frame expected");
330 // QQQ why does this specialize method exist if frame::sender_sp() does same thing?
331 // seems odd and if we always know interpreted vs. non then sender_sp() is really
332 // doing too much work.
333 return get_interpreterState()->sender_sp();
334 }
335
336 // monitor elements
337
interpreter_frame_monitor_begin() const338 BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
339 return get_interpreterState()->monitor_base();
340 }
341
interpreter_frame_monitor_end() const342 BasicObjectLock* frame::interpreter_frame_monitor_end() const {
343 return (BasicObjectLock*) get_interpreterState()->stack_base();
344 }
345
346 #else // CC_INTERP
347
interpreter_frame_sender_sp() const348 intptr_t* frame::interpreter_frame_sender_sp() const {
349 assert(is_interpreted_frame(), "interpreted frame expected");
350 return (intptr_t*) at(interpreter_frame_sender_sp_offset);
351 }
352
set_interpreter_frame_sender_sp(intptr_t * sender_sp)353 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
354 assert(is_interpreted_frame(), "interpreted frame expected");
355 ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
356 }
357
358
359 // monitor elements
360
interpreter_frame_monitor_begin() const361 BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
362 return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
363 }
364
interpreter_frame_monitor_end() const365 BasicObjectLock* frame::interpreter_frame_monitor_end() const {
366 BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
367 // make sure the pointer points inside the frame
368 assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
369 assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer");
370 return result;
371 }
372
interpreter_frame_set_monitor_end(BasicObjectLock * value)373 void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
374 *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
375 }
376
377 // Used by template based interpreter deoptimization
interpreter_frame_set_last_sp(intptr_t * sp)378 void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
379 *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
380 }
381 #endif // CC_INTERP
382
sender_for_entry_frame(RegisterMap * map) const383 frame frame::sender_for_entry_frame(RegisterMap* map) const {
384 assert(map != NULL, "map must be set");
385 // Java frame called from C; skip all C frames and return top C
386 // frame of that chunk as the sender
387 JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
388 assert(!entry_frame_is_first(), "next Java fp must be non zero");
389 assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
390 // Since we are walking the stack now this nested anchor is obviously walkable
391 // even if it wasn't when it was stacked.
392 if (!jfa->walkable()) {
393 // Capture _last_Java_pc (if needed) and mark anchor walkable.
394 jfa->capture_last_Java_pc();
395 }
396 map->clear();
397 assert(map->include_argument_oops(), "should be set by clear");
398 assert(jfa->last_Java_pc() != NULL, "not walkable");
399 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
400 return fr;
401 }
402
403 //------------------------------------------------------------------------------
404 // frame::verify_deopt_original_pc
405 //
406 // Verifies the calculated original PC of a deoptimization PC for the
407 // given unextended SP. The unextended SP might also be the saved SP
408 // for MethodHandle call sites.
409 #ifdef ASSERT
verify_deopt_original_pc(nmethod * nm,intptr_t * unextended_sp,bool is_method_handle_return)410 void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
411 frame fr;
412
413 // This is ugly but it's better than to change {get,set}_original_pc
414 // to take an SP value as argument. And it's only a debugging
415 // method anyway.
416 fr._unextended_sp = unextended_sp;
417
418 address original_pc = nm->get_original_pc(&fr);
419 assert(nm->insts_contains(original_pc), "original PC must be in nmethod");
420 assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
421 }
422 #endif
423
424 //------------------------------------------------------------------------------
425 // frame::adjust_unextended_sp
adjust_unextended_sp()426 void frame::adjust_unextended_sp() {
427 // If we are returning to a compiled MethodHandle call site, the
428 // saved_fp will in fact be a saved value of the unextended SP. The
429 // simplest way to tell whether we are returning to such a call site
430 // is as follows:
431
432 nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null();
433 if (sender_nm != NULL) {
434 // If the sender PC is a deoptimization point, get the original
435 // PC. For MethodHandle call site the unextended_sp is stored in
436 // saved_fp.
437 if (sender_nm->is_deopt_mh_entry(_pc)) {
438 DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp));
439 _unextended_sp = _fp;
440 }
441 else if (sender_nm->is_deopt_entry(_pc)) {
442 DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
443 }
444 else if (sender_nm->is_method_handle_return(_pc)) {
445 _unextended_sp = _fp;
446 }
447 }
448 }
449
450 //------------------------------------------------------------------------------
451 // frame::update_map_with_saved_link
update_map_with_saved_link(RegisterMap * map,intptr_t ** link_addr)452 void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
453 // The interpreter and compiler(s) always save fp in a known
454 // location on entry. We must record where that location is
455 // so that if fp was live on callout from c2 we can find
456 // the saved copy no matter what it called.
457
458 // Since the interpreter always saves fp if we record where it is then
459 // we don't have to always save fp on entry and exit to c2 compiled
460 // code, on entry will be enough.
461 map->set_location(rfp->as_VMReg(), (address) link_addr);
462 // this is weird "H" ought to be at a higher address however the
463 // oopMaps seems to have the "H" regs at the same address and the
464 // vanilla register.
465 // XXXX make this go away
466 if (true) {
467 map->set_location(rfp->as_VMReg()->next(), (address) link_addr);
468 }
469 }
470
471
472 //------------------------------------------------------------------------------
473 // frame::sender_for_interpreter_frame
sender_for_interpreter_frame(RegisterMap * map) const474 frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
475 // SP is the raw SP from the sender after adapter or interpreter
476 // extension.
477 intptr_t* sender_sp = this->sender_sp();
478
479 // This is the sp before any possible extension (adapter/locals).
480 intptr_t* unextended_sp = interpreter_frame_sender_sp();
481
482 #ifdef COMPILER2
483 if (map->update_map()) {
484 update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
485 }
486 #endif // COMPILER2
487
488 return frame(sender_sp, unextended_sp, link(), sender_pc());
489 }
490
491
492 //------------------------------------------------------------------------------
493 // frame::sender_for_compiled_frame
sender_for_compiled_frame(RegisterMap * map) const494 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
495 // we cannot rely upon the last fp having been saved to the thread
496 // in C2 code but it will have been pushed onto the stack. so we
497 // have to find it relative to the unextended sp
498
499 assert(_cb->frame_size() >= 0, "must have non-zero frame size");
500 intptr_t* l_sender_sp = unextended_sp() + _cb->frame_size();
501 intptr_t* unextended_sp = l_sender_sp;
502
503 // the return_address is always the word on the stack
504 address sender_pc = (address) *(l_sender_sp-1);
505
506 intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp - frame::sender_sp_offset);
507
508 // assert (sender_sp() == l_sender_sp, "should be");
509 // assert (*saved_fp_addr == link(), "should be");
510
511 if (map->update_map()) {
512 // Tell GC to use argument oopmaps for some runtime stubs that need it.
513 // For C1, the runtime stub might not have oop maps, so set this flag
514 // outside of update_register_map.
515 map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
516 if (_cb->oop_maps() != NULL) {
517 OopMapSet::update_register_map(this, map);
518 }
519
520 // Since the prolog does the save and restore of EBP there is no oopmap
521 // for it so we must fill in its location as if there was an oopmap entry
522 // since if our caller was compiled code there could be live jvm state in it.
523 update_map_with_saved_link(map, saved_fp_addr);
524 }
525
526 return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
527 }
528
529 //------------------------------------------------------------------------------
530 // frame::sender
sender(RegisterMap * map) const531 frame frame::sender(RegisterMap* map) const {
532 // Default is we done have to follow them. The sender_for_xxx will
533 // update it accordingly
534 map->set_include_argument_oops(false);
535
536 if (is_entry_frame())
537 return sender_for_entry_frame(map);
538 if (is_interpreted_frame())
539 return sender_for_interpreter_frame(map);
540 assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
541
542 // This test looks odd: why is it not is_compiled_frame() ? That's
543 // because stubs also have OOP maps.
544 if (_cb != NULL) {
545 return sender_for_compiled_frame(map);
546 }
547
548 // Must be native-compiled frame, i.e. the marshaling code for native
549 // methods that exists in the core system.
550 return frame(sender_sp(), link(), sender_pc());
551 }
552
interpreter_frame_equals_unpacked_fp(intptr_t * fp)553 bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
554 assert(is_interpreted_frame(), "must be interpreter frame");
555 Method* method = interpreter_frame_method();
556 // When unpacking an optimized frame the frame pointer is
557 // adjusted with:
558 int diff = (method->max_locals() - method->size_of_parameters()) *
559 Interpreter::stackElementWords;
560 return _fp == (fp - diff);
561 }
562
pd_gc_epilog()563 void frame::pd_gc_epilog() {
564 // nothing done here now
565 }
566
is_interpreted_frame_valid(JavaThread * thread) const567 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
568 // QQQ
569 #ifdef CC_INTERP
570 #else
571 assert(is_interpreted_frame(), "Not an interpreted frame");
572 // These are reasonable sanity checks
573 if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
574 return false;
575 }
576 if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
577 return false;
578 }
579 if (fp() + interpreter_frame_initial_sp_offset < sp()) {
580 return false;
581 }
582 // These are hacks to keep us out of trouble.
583 // The problem with these is that they mask other problems
584 if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
585 return false;
586 }
587
588 // do some validation of frame elements
589
590 // first the method
591
592 Method* m = *interpreter_frame_method_addr();
593
594 // validate the method we'd find in this potential sender
595 if (!m->is_valid_method()) return false;
596
597 // stack frames shouldn't be much larger than max_stack elements
598 // this test requires the use of unextended_sp which is the sp as seen by
599 // the current frame, and not sp which is the "raw" pc which could point
600 // further because of local variables of the callee method inserted after
601 // method arguments
602 if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
603 return false;
604 }
605
606 // validate bci/bcx
607
608 intptr_t bcx = interpreter_frame_bcx();
609 if (m->validate_bci_from_bcx(bcx) < 0) {
610 return false;
611 }
612
613 // validate constantPoolCache*
614 ConstantPoolCache* cp = *interpreter_frame_cache_addr();
615 if (cp == NULL || !cp->is_metaspace_object()) return false;
616
617 // validate locals
618
619 address locals = (address) *interpreter_frame_locals_addr();
620
621 if (locals > thread->stack_base() || locals < (address) fp()) return false;
622
623 // We'd have to be pretty unlucky to be mislead at this point
624
625 #endif // CC_INTERP
626 return true;
627 }
628
interpreter_frame_result(oop * oop_result,jvalue * value_result)629 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
630 #ifdef CC_INTERP
631 // Needed for JVMTI. The result should always be in the
632 // interpreterState object
633 interpreterState istate = get_interpreterState();
634 #endif // CC_INTERP
635 assert(is_interpreted_frame(), "interpreted frame expected");
636 Method* method = interpreter_frame_method();
637 BasicType type = method->result_type();
638
639 intptr_t* tos_addr;
640 if (method->is_native()) {
641 // TODO : ensure AARCH64 does the same as Intel here i.e. push v0 then r0
642 // Prior to calling into the runtime to report the method_exit the possible
643 // return value is pushed to the native stack. If the result is a jfloat/jdouble
644 // then ST0 is saved before EAX/EDX. See the note in generate_native_result
645 tos_addr = (intptr_t*)sp();
646 if (type == T_FLOAT || type == T_DOUBLE) {
647 // This is times two because we do a push(ltos) after pushing XMM0
648 // and that takes two interpreter stack slots.
649 tos_addr += 2 * Interpreter::stackElementWords;
650 }
651 } else {
652 tos_addr = (intptr_t*)interpreter_frame_tos_address();
653 }
654
655 switch (type) {
656 case T_OBJECT :
657 case T_ARRAY : {
658 oop obj;
659 if (method->is_native()) {
660 #ifdef CC_INTERP
661 obj = istate->_oop_temp;
662 #else
663 obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
664 #endif // CC_INTERP
665 } else {
666 oop* obj_p = (oop*)tos_addr;
667 obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
668 }
669 assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
670 *oop_result = obj;
671 break;
672 }
673 case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break;
674 case T_BYTE : value_result->b = *(jbyte*)tos_addr; break;
675 case T_CHAR : value_result->c = *(jchar*)tos_addr; break;
676 case T_SHORT : value_result->s = *(jshort*)tos_addr; break;
677 case T_INT : value_result->i = *(jint*)tos_addr; break;
678 case T_LONG : value_result->j = *(jlong*)tos_addr; break;
679 case T_FLOAT : {
680 value_result->f = *(jfloat*)tos_addr;
681 break;
682 }
683 case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
684 case T_VOID : /* Nothing to do */ break;
685 default : ShouldNotReachHere();
686 }
687
688 return type;
689 }
690
691
interpreter_frame_tos_at(jint offset) const692 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
693 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
694 return &interpreter_frame_tos_address()[index];
695 }
696
697 #ifndef PRODUCT
698
699 #define DESCRIBE_FP_OFFSET(name) \
700 values.describe(frame_no, fp() + frame::name##_offset, #name)
701
describe_pd(FrameValues & values,int frame_no)702 void frame::describe_pd(FrameValues& values, int frame_no) {
703 if (is_interpreted_frame()) {
704 DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
705 DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
706 DESCRIBE_FP_OFFSET(interpreter_frame_method);
707 DESCRIBE_FP_OFFSET(interpreter_frame_mdx);
708 DESCRIBE_FP_OFFSET(interpreter_frame_cache);
709 DESCRIBE_FP_OFFSET(interpreter_frame_locals);
710 DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
711 DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
712 }
713 }
714 #endif
715
initial_deoptimization_info()716 intptr_t *frame::initial_deoptimization_info() {
717 // Not used on aarch64, but we must return something.
718 return NULL;
719 }
720
real_fp() const721 intptr_t* frame::real_fp() const {
722 if (_cb != NULL) {
723 // use the frame size if valid
724 int size = _cb->frame_size();
725 if (size > 0) {
726 return unextended_sp() + size;
727 }
728 }
729 // else rely on fp()
730 assert(! is_compiled_frame(), "unknown compiled frame size");
731 return fp();
732 }
733
734 #undef DESCRIBE_FP_OFFSET
735
736 #define DESCRIBE_FP_OFFSET(name) \
737 { \
738 unsigned long *p = (unsigned long *)fp; \
739 printf("0x%016lx 0x%016lx %s\n", (unsigned long)(p + frame::name##_offset), \
740 p[frame::name##_offset], #name); \
741 }
742
743 static __thread unsigned long nextfp;
744 static __thread unsigned long nextpc;
745 static __thread unsigned long nextsp;
746 static __thread RegisterMap *reg_map;
747
printbc(Method * m,intptr_t bcx)748 static void printbc(Method *m, intptr_t bcx) {
749 const char *name;
750 char buf[16];
751 if (m->validate_bci_from_bcx(bcx) < 0
752 || !m->contains((address)bcx)) {
753 name = "???";
754 snprintf(buf, sizeof buf, "(bad)");
755 } else {
756 int bci = m->bci_from((address)bcx);
757 snprintf(buf, sizeof buf, "%d", bci);
758 name = Bytecodes::name(m->code_at(bci));
759 }
760 ResourceMark rm;
761 printf("%s : %s ==> %s\n", m->name_and_sig_as_C_string(), buf, name);
762 }
763
internal_pf(unsigned long sp,unsigned long fp,unsigned long pc,unsigned long bcx)764 void internal_pf(unsigned long sp, unsigned long fp, unsigned long pc, unsigned long bcx) {
765 if (! fp)
766 return;
767
768 DESCRIBE_FP_OFFSET(return_addr);
769 DESCRIBE_FP_OFFSET(link);
770 DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
771 DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
772 DESCRIBE_FP_OFFSET(interpreter_frame_method);
773 DESCRIBE_FP_OFFSET(interpreter_frame_mdx);
774 DESCRIBE_FP_OFFSET(interpreter_frame_cache);
775 DESCRIBE_FP_OFFSET(interpreter_frame_locals);
776 DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
777 DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
778 unsigned long *p = (unsigned long *)fp;
779
780 // We want to see all frames, native and Java. For compiled and
781 // interpreted frames we have special information that allows us to
782 // unwind them; for everything else we assume that the native frame
783 // pointer chain is intact.
784 frame this_frame((intptr_t*)sp, (intptr_t*)fp, (address)pc);
785 if (this_frame.is_compiled_frame() ||
786 this_frame.is_interpreted_frame()) {
787 frame sender = this_frame.sender(reg_map);
788 nextfp = (unsigned long)sender.fp();
789 nextpc = (unsigned long)sender.pc();
790 nextsp = (unsigned long)sender.unextended_sp();
791 } else {
792 nextfp = p[frame::link_offset];
793 nextpc = p[frame::return_addr_offset];
794 nextsp = (unsigned long)&p[frame::sender_sp_offset];
795 }
796
797 if (bcx == -1ul)
798 bcx = p[frame::interpreter_frame_bcx_offset];
799
800 if (Interpreter::contains((address)pc)) {
801 Method* m = (Method*)p[frame::interpreter_frame_method_offset];
802 if(m && m->is_method()) {
803 printbc(m, bcx);
804 } else
805 printf("not a Method\n");
806 } else {
807 CodeBlob *cb = CodeCache::find_blob((address)pc);
808 if (cb != NULL) {
809 if (cb->is_nmethod()) {
810 ResourceMark rm;
811 nmethod* nm = (nmethod*)cb;
812 printf("nmethod %s\n", nm->method()->name_and_sig_as_C_string());
813 } else if (cb->name()) {
814 printf("CodeBlob %s\n", cb->name());
815 }
816 }
817 }
818 }
819
npf()820 extern "C" void npf() {
821 CodeBlob *cb = CodeCache::find_blob((address)nextpc);
822 // C2 does not always chain the frame pointers when it can, instead
823 // preferring to use fixed offsets from SP, so a simple leave() does
824 // not work. Instead, it adds the frame size to SP then pops FP and
825 // LR. We have to do the same thing to get a good call chain.
826 if (cb && cb->frame_size())
827 nextfp = nextsp + wordSize * (cb->frame_size() - 2);
828 internal_pf (nextsp, nextfp, nextpc, -1);
829 }
830
pf(unsigned long sp,unsigned long fp,unsigned long pc,unsigned long bcx,unsigned long thread)831 extern "C" void pf(unsigned long sp, unsigned long fp, unsigned long pc,
832 unsigned long bcx, unsigned long thread) {
833 if (!reg_map) {
834 reg_map = NEW_C_HEAP_OBJ(RegisterMap, mtNone);
835 ::new (reg_map) RegisterMap((JavaThread*)thread, false);
836 } else {
837 *reg_map = RegisterMap((JavaThread*)thread, false);
838 }
839
840 {
841 CodeBlob *cb = CodeCache::find_blob((address)pc);
842 if (cb && cb->frame_size())
843 fp = sp + wordSize * (cb->frame_size() - 2);
844 }
845 internal_pf(sp, fp, pc, bcx);
846 }
847
848 // support for printing out where we are in a Java method
849 // needs to be passed current fp and bcp register values
850 // prints method name, bc index and bytecode name
pm(unsigned long fp,unsigned long bcx)851 extern "C" void pm(unsigned long fp, unsigned long bcx) {
852 DESCRIBE_FP_OFFSET(interpreter_frame_method);
853 unsigned long *p = (unsigned long *)fp;
854 Method* m = (Method*)p[frame::interpreter_frame_method_offset];
855 printbc(m, bcx);
856 }
857
858 #ifndef PRODUCT
859 // This is a generic constructor which is only used by pns() in debug.cpp.
frame(void * sp,void * fp,void * pc)860 frame::frame(void* sp, void* fp, void* pc) {
861 init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
862 }
863 #endif
864
make_walkable(JavaThread * thread)865 void JavaFrameAnchor::make_walkable(JavaThread* thread) {
866 // last frame set?
867 if (last_Java_sp() == NULL) return;
868 // already walkable?
869 if (walkable()) return;
870 assert(Thread::current() == (Thread*)thread, "not current thread");
871 assert(last_Java_sp() != NULL, "not called from Java code?");
872 assert(last_Java_pc() == NULL, "already walkable");
873 capture_last_Java_pc();
874 assert(walkable(), "something went wrong");
875 }
876
capture_last_Java_pc()877 void JavaFrameAnchor::capture_last_Java_pc() {
878 assert(_last_Java_sp != NULL, "no last frame set");
879 assert(_last_Java_pc == NULL, "already walkable");
880 _last_Java_pc = (address)_last_Java_sp[-1];
881 }
882