1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_PARSE_HPP 26 #define SHARE_OPTO_PARSE_HPP 27 28 #include "ci/ciMethodData.hpp" 29 #include "ci/ciTypeFlow.hpp" 30 #include "compiler/methodLiveness.hpp" 31 #include "libadt/vectset.hpp" 32 #include "oops/generateOopMap.hpp" 33 #include "opto/graphKit.hpp" 34 #include "opto/subnode.hpp" 35 36 class BytecodeParseHistogram; 37 class InlineTree; 38 class Parse; 39 class SwitchRange; 40 41 42 //------------------------------InlineTree------------------------------------- 43 class InlineTree : public ResourceObj { 44 friend class VMStructs; 45 46 Compile* C; // cache 47 JVMState* _caller_jvms; // state of caller 48 ciMethod* _method; // method being called by the caller_jvms 49 InlineTree* _caller_tree; 50 uint _count_inline_bcs; // Accumulated count of inlined bytecodes 51 const int _max_inline_level; // the maximum inline level for this sub-tree (may be adjusted) 52 53 GrowableArray<InlineTree*> _subtrees; 54 55 bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* callee_method); 56 57 void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN; 58 const char* _msg; 59 protected: 60 InlineTree(Compile* C, 61 const InlineTree* caller_tree, 62 ciMethod* callee_method, 63 JVMState* caller_jvms, 64 int caller_bci, 65 int max_inline_level); 66 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method, 67 JVMState* caller_jvms, 68 int caller_bci); 69 bool try_to_inline(ciMethod* callee_method, 70 ciMethod* caller_method, 71 int caller_bci, 72 JVMState* jvms, 73 ciCallProfile& profile, 74 WarmCallInfo* wci_result, 75 bool& should_delay); 76 bool should_inline(ciMethod* callee_method, 77 ciMethod* caller_method, 78 int caller_bci, 79 ciCallProfile& profile, 80 WarmCallInfo* wci_result); 81 bool should_not_inline(ciMethod* callee_method, 82 ciMethod* caller_method, 83 JVMState* jvms, 84 WarmCallInfo* wci_result); 85 bool is_not_reached(ciMethod* callee_method, 86 ciMethod* caller_method, 87 int caller_bci, 88 ciCallProfile& profile); 89 void print_inlining(ciMethod* callee_method, int caller_bci, 90 ciMethod* caller_method, bool success) const; 91 caller_tree() const92 InlineTree* caller_tree() const { return _caller_tree; } 93 InlineTree* callee_at(int bci, ciMethod* m) const; inline_level() const94 int inline_level() const { return stack_depth(); } stack_depth() const95 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; } msg() const96 const char* msg() const { return _msg; } set_msg(const char * msg)97 void set_msg(const char* msg) { _msg = msg; } 98 public: 99 static const char* check_can_parse(ciMethod* callee); 100 101 static InlineTree* build_inline_tree_root(); 102 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee); 103 104 // See if it is OK to inline. 105 // The receiver is the inline tree for the caller. 106 // 107 // The result is a temperature indication. If it is hot or cold, 108 // inlining is immediate or undesirable. Otherwise, the info block 109 // returned is newly allocated and may be enqueued. 110 // 111 // If the method is inlinable, a new inline subtree is created on the fly, 112 // and may be accessed by find_subtree_from_root. 113 // The call_method is the dest_method for a special or static invocation. 114 // The call_method is an optimized virtual method candidate otherwise. 115 WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay); 116 117 // Information about inlined method caller_jvms() const118 JVMState* caller_jvms() const { return _caller_jvms; } method() const119 ciMethod *method() const { return _method; } caller_bci() const120 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; } count_inline_bcs() const121 uint count_inline_bcs() const { return _count_inline_bcs; } 122 123 #ifndef PRODUCT 124 private: 125 uint _count_inlines; // Count of inlined methods 126 public: 127 // Debug information collected during parse count_inlines() const128 uint count_inlines() const { return _count_inlines; }; 129 #endif subtrees()130 GrowableArray<InlineTree*> subtrees() { return _subtrees; } 131 132 void print_value_on(outputStream* st) const PRODUCT_RETURN; 133 134 bool _forced_inline; // Inlining was forced by CompilerOracle, ciReplay or annotation forced_inline() const135 bool forced_inline() const { return _forced_inline; } 136 // Count number of nodes in this subtree 137 int count() const; 138 // Dump inlining replay data to the stream. 139 void dump_replay_data(outputStream* out); 140 }; 141 142 143 //----------------------------------------------------------------------------- 144 //------------------------------Parse------------------------------------------ 145 // Parse bytecodes, build a Graph 146 class Parse : public GraphKit { 147 public: 148 // Per-block information needed by the parser: 149 class Block { 150 private: 151 ciTypeFlow::Block* _flow; 152 int _pred_count; // how many predecessors in CFG? 153 int _preds_parsed; // how many of these have been parsed? 154 uint _count; // how many times executed? Currently only set by _goto's 155 bool _is_parsed; // has this block been parsed yet? 156 bool _is_handler; // is this block an exception handler? 157 bool _has_merged_backedge; // does this block have merged backedge? 158 SafePointNode* _start_map; // all values flowing into this block 159 MethodLivenessResult _live_locals; // lazily initialized liveness bitmap 160 bool _has_predicates; // Were predicates added before parsing of the loop head? 161 162 int _num_successors; // Includes only normal control flow. 163 int _all_successors; // Include exception paths also. 164 Block** _successors; 165 166 public: 167 168 // Set up the block data structure itself. 169 Block(Parse* outer, int rpo); 170 171 // Set up the block's relations to other blocks. 172 void init_graph(Parse* outer); 173 flow() const174 ciTypeFlow::Block* flow() const { return _flow; } pred_count() const175 int pred_count() const { return _pred_count; } preds_parsed() const176 int preds_parsed() const { return _preds_parsed; } is_parsed() const177 bool is_parsed() const { return _is_parsed; } is_handler() const178 bool is_handler() const { return _is_handler; } set_count(uint x)179 void set_count( uint x ) { _count = x; } count() const180 uint count() const { return _count; } 181 start_map() const182 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; } set_start_map(SafePointNode * m)183 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; } 184 185 // True after any predecessor flows control into this block is_merged() const186 bool is_merged() const { return _start_map != NULL; } 187 188 #ifdef ASSERT 189 // True after backedge predecessor flows control into this block has_merged_backedge() const190 bool has_merged_backedge() const { return _has_merged_backedge; } mark_merged_backedge(Block * pred)191 void mark_merged_backedge(Block* pred) { 192 assert(is_SEL_head(), "should be loop head"); 193 if (pred != NULL && is_SEL_backedge(pred)) { 194 assert(is_parsed(), "block should be parsed before merging backedges"); 195 _has_merged_backedge = true; 196 } 197 } 198 #endif 199 200 // True when all non-exception predecessors have been parsed. is_ready() const201 bool is_ready() const { return preds_parsed() == pred_count(); } 202 has_predicates() const203 bool has_predicates() const { return _has_predicates; } set_has_predicates()204 void set_has_predicates() { _has_predicates = true; } 205 num_successors() const206 int num_successors() const { return _num_successors; } all_successors() const207 int all_successors() const { return _all_successors; } successor_at(int i) const208 Block* successor_at(int i) const { 209 assert((uint)i < (uint)all_successors(), ""); 210 return _successors[i]; 211 } 212 Block* successor_for_bci(int bci); 213 start() const214 int start() const { return flow()->start(); } limit() const215 int limit() const { return flow()->limit(); } rpo() const216 int rpo() const { return flow()->rpo(); } start_sp() const217 int start_sp() const { return flow()->stack_size(); } 218 is_loop_head() const219 bool is_loop_head() const { return flow()->is_loop_head(); } is_SEL_head() const220 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); } is_SEL_backedge(Block * pred) const221 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); } is_invariant_local(uint i) const222 bool is_invariant_local(uint i) const { 223 const JVMState* jvms = start_map()->jvms(); 224 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false; 225 return flow()->is_invariant_local(i - jvms->locoff()); 226 } can_elide_SEL_phi(uint i) const227 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); } 228 peek(int off=0) const229 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); } 230 231 const Type* stack_type_at(int i) const; 232 const Type* local_type_at(int i) const; get_type(ciType * t)233 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); } 234 has_trap_at(int bci) const235 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; } 236 237 // Call this just before parsing a block. mark_parsed()238 void mark_parsed() { 239 assert(!_is_parsed, "must parse each block exactly once"); 240 _is_parsed = true; 241 } 242 243 // Return the phi/region input index for the "current" pred, 244 // and bump the pred number. For historical reasons these index 245 // numbers are handed out in descending order. The last index is 246 // always PhiNode::Input (i.e., 1). The value returned is known 247 // as a "path number" because it distinguishes by which path we are 248 // entering the block. next_path_num()249 int next_path_num() { 250 assert(preds_parsed() < pred_count(), "too many preds?"); 251 return pred_count() - _preds_parsed++; 252 } 253 254 // Add a previously unaccounted predecessor to this block. 255 // This operates by increasing the size of the block's region 256 // and all its phi nodes (if any). The value returned is a 257 // path number ("pnum"). 258 int add_new_path(); 259 260 // Initialize me by recording the parser's map. My own map must be NULL. 261 void record_state(Parse* outer); 262 }; 263 264 #ifndef PRODUCT 265 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations. 266 class BytecodeParseHistogram : public ResourceObj { 267 private: 268 enum BPHType { 269 BPH_transforms, 270 BPH_values 271 }; 272 static bool _initialized; 273 static uint _bytecodes_parsed [Bytecodes::number_of_codes]; 274 static uint _nodes_constructed[Bytecodes::number_of_codes]; 275 static uint _nodes_transformed[Bytecodes::number_of_codes]; 276 static uint _new_values [Bytecodes::number_of_codes]; 277 278 Bytecodes::Code _initial_bytecode; 279 int _initial_node_count; 280 int _initial_transforms; 281 int _initial_values; 282 283 Parse *_parser; 284 Compile *_compiler; 285 286 // Initialization 287 static void reset(); 288 289 // Return info being collected, select with global flag 'BytecodeParseInfo' 290 int current_count(BPHType info_selector); 291 292 public: 293 BytecodeParseHistogram(Parse *p, Compile *c); 294 static bool initialized(); 295 296 // Record info when starting to parse one bytecode 297 void set_initial_state( Bytecodes::Code bc ); 298 // Record results of parsing one bytecode 299 void record_change(); 300 301 // Profile printing 302 static void print(float cutoff = 0.01F); // cutoff in percent 303 }; 304 305 public: 306 // Record work done during parsing 307 BytecodeParseHistogram* _parse_histogram; set_parse_histogram(BytecodeParseHistogram * bph)308 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; } parse_histogram()309 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; } 310 #endif 311 312 private: 313 friend class Block; 314 315 // Variables which characterize this compilation as a whole: 316 317 JVMState* _caller; // JVMS which carries incoming args & state. 318 float _expected_uses; // expected number of calls to this code 319 float _prof_factor; // discount applied to my profile counts 320 int _depth; // Inline tree depth, for debug printouts 321 const TypeFunc*_tf; // My kind of function type 322 int _entry_bci; // the osr bci or InvocationEntryBci 323 324 ciTypeFlow* _flow; // Results of previous flow pass. 325 Block* _blocks; // Array of basic-block structs. 326 int _block_count; // Number of elements in _blocks. 327 328 GraphKit _exits; // Record all normal returns and throws here. 329 bool _wrote_final; // Did we write a final field? 330 bool _wrote_volatile; // Did we write a volatile field? 331 bool _wrote_stable; // Did we write a @Stable field? 332 bool _wrote_fields; // Did we write any field? 333 Node* _alloc_with_final; // An allocation node with final field 334 335 // Variables which track Java semantics during bytecode parsing: 336 337 Block* _block; // block currently getting parsed 338 ciBytecodeStream _iter; // stream of this method's bytecodes 339 340 const FastLockNode* _synch_lock; // FastLockNode for synchronized method 341 342 #ifndef PRODUCT 343 int _max_switch_depth; // Debugging SwitchRanges. 344 int _est_switch_depth; // Debugging SwitchRanges. 345 #endif 346 347 bool _first_return; // true if return is the first to be parsed 348 bool _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths? 349 uint _new_idx; // any node with _idx above were new during this parsing. Used to trim the replaced nodes list. 350 351 public: 352 // Constructor 353 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses); 354 is_Parse() const355 virtual Parse* is_Parse() const { return (Parse*)this; } 356 357 // Accessors. caller() const358 JVMState* caller() const { return _caller; } expected_uses() const359 float expected_uses() const { return _expected_uses; } prof_factor() const360 float prof_factor() const { return _prof_factor; } depth() const361 int depth() const { return _depth; } tf() const362 const TypeFunc* tf() const { return _tf; } 363 // entry_bci() -- see osr_bci, etc. 364 flow() const365 ciTypeFlow* flow() const { return _flow; } 366 // blocks() -- see rpo_at, start_block, etc. block_count() const367 int block_count() const { return _block_count; } 368 exits()369 GraphKit& exits() { return _exits; } wrote_final() const370 bool wrote_final() const { return _wrote_final; } set_wrote_final(bool z)371 void set_wrote_final(bool z) { _wrote_final = z; } wrote_volatile() const372 bool wrote_volatile() const { return _wrote_volatile; } set_wrote_volatile(bool z)373 void set_wrote_volatile(bool z) { _wrote_volatile = z; } wrote_stable() const374 bool wrote_stable() const { return _wrote_stable; } set_wrote_stable(bool z)375 void set_wrote_stable(bool z) { _wrote_stable = z; } wrote_fields() const376 bool wrote_fields() const { return _wrote_fields; } set_wrote_fields(bool z)377 void set_wrote_fields(bool z) { _wrote_fields = z; } alloc_with_final() const378 Node* alloc_with_final() const { return _alloc_with_final; } set_alloc_with_final(Node * n)379 void set_alloc_with_final(Node* n) { 380 assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?"); 381 _alloc_with_final = n; 382 } 383 block() const384 Block* block() const { return _block; } iter()385 ciBytecodeStream& iter() { return _iter; } bc() const386 Bytecodes::Code bc() const { return _iter.cur_bc(); } 387 set_block(Block * b)388 void set_block(Block* b) { _block = b; } 389 390 // Derived accessors: is_normal_parse() const391 bool is_normal_parse() const { return _entry_bci == InvocationEntryBci; } is_osr_parse() const392 bool is_osr_parse() const { return _entry_bci != InvocationEntryBci; } osr_bci() const393 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; } 394 395 void set_parse_bci(int bci); 396 397 // Must this parse be aborted? failing()398 bool failing() { return C->failing(); } 399 rpo_at(int rpo)400 Block* rpo_at(int rpo) { 401 assert(0 <= rpo && rpo < _block_count, "oob"); 402 return &_blocks[rpo]; 403 } start_block()404 Block* start_block() { 405 return rpo_at(flow()->start_block()->rpo()); 406 } 407 // Can return NULL if the flow pass did not complete a block. successor_for_bci(int bci)408 Block* successor_for_bci(int bci) { 409 return block()->successor_for_bci(bci); 410 } 411 412 private: 413 // Create a JVMS & map for the initial state of this method. 414 SafePointNode* create_entry_map(); 415 416 // OSR helpers 417 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base); 418 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit); 419 void load_interpreter_state(Node* osr_buf); 420 421 // Functions for managing basic blocks: 422 void init_blocks(); 423 void load_state_from(Block* b); store_state_to(Block * b)424 void store_state_to(Block* b) { b->record_state(this); } 425 426 // Parse all the basic blocks. 427 void do_all_blocks(); 428 429 // Parse the current basic block 430 void do_one_block(); 431 432 // Raise an error if we get a bad ciTypeFlow CFG. 433 void handle_missing_successor(int bci); 434 435 // first actions (before BCI 0) 436 void do_method_entry(); 437 438 // implementation of monitorenter/monitorexit 439 void do_monitor_enter(); 440 void do_monitor_exit(); 441 442 // Eagerly create phie throughout the state, to cope with back edges. 443 void ensure_phis_everywhere(); 444 445 // Merge the current mapping into the basic block starting at bci 446 void merge( int target_bci); 447 // Same as plain merge, except that it allocates a new path number. 448 void merge_new_path( int target_bci); 449 // Merge the current mapping into an exception handler. 450 void merge_exception(int target_bci); 451 // Helper: Merge the current mapping into the given basic block 452 void merge_common(Block* target, int pnum); 453 // Helper functions for merging individual cells. 454 PhiNode *ensure_phi( int idx, bool nocreate = false); 455 PhiNode *ensure_memory_phi(int idx, bool nocreate = false); 456 // Helper to merge the current memory state into the given basic block 457 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi); 458 459 // Parse this bytecode, and alter the Parsers JVM->Node mapping 460 void do_one_bytecode(); 461 462 // helper function to generate array store check 463 void array_store_check(); 464 // Helper function to generate array load 465 void array_load(BasicType etype); 466 // Helper function to generate array store 467 void array_store(BasicType etype); 468 // Helper function to compute array addressing 469 Node* array_addressing(BasicType type, int vals, const Type*& elemtype); 470 471 void clinit_deopt(); 472 473 void rtm_deopt(); 474 475 // Pass current map to exits 476 void return_current(Node* value); 477 478 // Register finalizers on return from Object.<init> 479 void call_register_finalizer(); 480 481 // Insert a compiler safepoint into the graph 482 void add_safepoint(); 483 484 // Insert a compiler safepoint into the graph, if there is a back-branch. maybe_add_safepoint(int target_bci)485 void maybe_add_safepoint(int target_bci) { 486 if (UseLoopSafepoints && target_bci <= bci()) { 487 add_safepoint(); 488 } 489 } 490 491 // Note: Intrinsic generation routines may be found in library_call.cpp. 492 493 // Helper function to setup Ideal Call nodes 494 void do_call(); 495 496 // Helper function to uncommon-trap or bailout for non-compilable call-sites 497 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass); 498 499 // Helper functions for type checking bytecodes: 500 void do_checkcast(); 501 void do_instanceof(); 502 503 // Helper functions for shifting & arithmetic 504 void modf(); 505 void modd(); 506 void l2f(); 507 508 void do_irem(); 509 510 // implementation of _get* and _put* bytecodes do_getstatic()511 void do_getstatic() { do_field_access(true, false); } do_getfield()512 void do_getfield () { do_field_access(true, true); } do_putstatic()513 void do_putstatic() { do_field_access(false, false); } do_putfield()514 void do_putfield () { do_field_access(false, true); } 515 516 // common code for making initial checks and forming addresses 517 void do_field_access(bool is_get, bool is_field); 518 519 // common code for actually performing the load or store 520 void do_get_xxx(Node* obj, ciField* field, bool is_field); 521 void do_put_xxx(Node* obj, ciField* field, bool is_field); 522 523 // implementation of object creation bytecodes 524 void do_new(); 525 void do_newarray(BasicType elemtype); 526 void do_anewarray(); 527 void do_multianewarray(); 528 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs); 529 530 // implementation of jsr/ret 531 void do_jsr(); 532 void do_ret(); 533 534 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test); 535 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test); 536 bool seems_never_taken(float prob) const; 537 bool path_is_suitable_for_uncommon_trap(float prob) const; 538 bool seems_stable_comparison() const; 539 540 void do_ifnull(BoolTest::mask btest, Node* c); 541 void do_if(BoolTest::mask btest, Node* c); 542 int repush_if_args(); 543 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, 544 Block* path, Block* other_path); 545 void sharpen_type_after_if(BoolTest::mask btest, 546 Node* con, const Type* tcon, 547 Node* val, const Type* tval); 548 void maybe_add_predicate_after_if(Block* path); 549 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt); 550 Node* jump_if_join(Node* iffalse, Node* iftrue); 551 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc); 552 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc); 553 void jump_if_always_fork(int dest_bci_if_true, bool unc); 554 555 friend class SwitchRange; 556 void do_tableswitch(); 557 void do_lookupswitch(); 558 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0); 559 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi); 560 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi); 561 562 void decrement_age(); 563 564 // helper function for call statistics 565 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN; 566 567 Node_Notes* make_node_notes(Node_Notes* caller_nn); 568 569 // Helper functions for handling normal and abnormal exits. 570 void build_exits(); 571 572 // Fix up all exceptional control flow exiting a single bytecode. 573 void do_exceptions(); 574 575 // Fix up all exiting control flow at the end of the parse. 576 void do_exits(); 577 578 // Add Catch/CatchProjs 579 // The call is either a Java call or the VM's rethrow stub 580 void catch_call_exceptions(ciExceptionHandlerStream&); 581 582 // Handle all exceptions thrown by the inlined method. 583 // Also handles exceptions for individual bytecodes. 584 void catch_inline_exceptions(SafePointNode* ex_map); 585 586 // Merge the given map into correct exceptional exit state. 587 // Assumes that there is no applicable local handler. 588 void throw_to_exit(SafePointNode* ex_map); 589 590 // Use speculative type to optimize CmpP node 591 Node* optimize_cmp_with_klass(Node* c); 592 593 public: 594 #ifndef PRODUCT 595 // Handle PrintOpto, etc. 596 void show_parse_info(); 597 void dump_map_adr_mem() const; 598 static void print_statistics(); // Print some performance counters 599 void dump(); 600 void dump_bci(int bci); 601 #endif 602 }; 603 604 #endif // SHARE_OPTO_PARSE_HPP 605