1 /*
2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "libadt/vectset.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/block.hpp"
29 #include "opto/c2compiler.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/cfgnode.hpp"
32 #include "opto/machnode.hpp"
33 #include "opto/opcodes.hpp"
34 #include "opto/phaseX.hpp"
35 #include "opto/rootnode.hpp"
36 #include "opto/runtime.hpp"
37 #include "runtime/deoptimization.hpp"
38 #if defined AD_MD_HPP
39 # include AD_MD_HPP
40 #elif defined TARGET_ARCH_MODEL_x86_32
41 # include "adfiles/ad_x86_32.hpp"
42 #elif defined TARGET_ARCH_MODEL_x86_64
43 # include "adfiles/ad_x86_64.hpp"
44 #elif defined TARGET_ARCH_MODEL_aarch64
45 # include "adfiles/ad_aarch64.hpp"
46 #elif defined TARGET_ARCH_MODEL_sparc
47 # include "adfiles/ad_sparc.hpp"
48 #elif defined TARGET_ARCH_MODEL_zero
49 # include "adfiles/ad_zero.hpp"
50 #elif defined TARGET_ARCH_MODEL_ppc_64
51 # include "adfiles/ad_ppc_64.hpp"
52 #endif
53 
54 
55 // Portions of code courtesy of Clifford Click
56 
57 // Optimization - Graph Style
58 
59 // To avoid float value underflow
60 #define MIN_BLOCK_FREQUENCY 1.e-35f
61 
62 //----------------------------schedule_node_into_block-------------------------
63 // Insert node n into block b. Look for projections of n and make sure they
64 // are in b also.
schedule_node_into_block(Node * n,Block * b)65 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
66   // Set basic block of n, Add n to b,
67   map_node_to_block(n, b);
68   b->add_inst(n);
69 
70   // After Matching, nearly any old Node may have projections trailing it.
71   // These are usually machine-dependent flags.  In any case, they might
72   // float to another block below this one.  Move them up.
73   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
74     Node*  use  = n->fast_out(i);
75     if (use->is_Proj()) {
76       Block* buse = get_block_for_node(use);
77       if (buse != b) {              // In wrong block?
78         if (buse != NULL) {
79           buse->find_remove(use);   // Remove from wrong block
80         }
81         map_node_to_block(use, b);
82         b->add_inst(use);
83       }
84     }
85   }
86 }
87 
88 //----------------------------replace_block_proj_ctrl-------------------------
89 // Nodes that have is_block_proj() nodes as their control need to use
90 // the appropriate Region for their actual block as their control since
91 // the projection will be in a predecessor block.
replace_block_proj_ctrl(Node * n)92 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
93   const Node *in0 = n->in(0);
94   assert(in0 != NULL, "Only control-dependent");
95   const Node *p = in0->is_block_proj();
96   if (p != NULL && p != n) {    // Control from a block projection?
97     assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
98     // Find trailing Region
99     Block *pb = get_block_for_node(in0); // Block-projection already has basic block
100     uint j = 0;
101     if (pb->_num_succs != 1) {  // More then 1 successor?
102       // Search for successor
103       uint max = pb->number_of_nodes();
104       assert( max > 1, "" );
105       uint start = max - pb->_num_succs;
106       // Find which output path belongs to projection
107       for (j = start; j < max; j++) {
108         if( pb->get_node(j) == in0 )
109           break;
110       }
111       assert( j < max, "must find" );
112       // Change control to match head of successor basic block
113       j -= start;
114     }
115     n->set_req(0, pb->_succs[j]->head());
116   }
117 }
118 
119 
120 //------------------------------schedule_pinned_nodes--------------------------
121 // Set the basic block for Nodes pinned into blocks
schedule_pinned_nodes(VectorSet & visited)122 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
123   // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
124   GrowableArray <Node *> spstack(C->live_nodes() + 8);
125   spstack.push(_root);
126   while (spstack.is_nonempty()) {
127     Node* node = spstack.pop();
128     if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
129       if (node->pinned() && !has_block(node)) {  // Pinned?  Nail it down!
130         assert(node->in(0), "pinned Node must have Control");
131         // Before setting block replace block_proj control edge
132         replace_block_proj_ctrl(node);
133         Node* input = node->in(0);
134         while (!input->is_block_start()) {
135           input = input->in(0);
136         }
137         Block* block = get_block_for_node(input); // Basic block of controlling input
138         schedule_node_into_block(node, block);
139       }
140 
141       // process all inputs that are non NULL
142       for (int i = node->req() - 1; i >= 0; --i) {
143         if (node->in(i) != NULL) {
144           spstack.push(node->in(i));
145         }
146       }
147     }
148   }
149 }
150 
151 #ifdef ASSERT
152 // Assert that new input b2 is dominated by all previous inputs.
153 // Check this by by seeing that it is dominated by b1, the deepest
154 // input observed until b2.
assert_dom(Block * b1,Block * b2,Node * n,const PhaseCFG * cfg)155 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
156   if (b1 == NULL)  return;
157   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
158   Block* tmp = b2;
159   while (tmp != b1 && tmp != NULL) {
160     tmp = tmp->_idom;
161   }
162   if (tmp != b1) {
163     // Detected an unschedulable graph.  Print some nice stuff and die.
164     tty->print_cr("!!! Unschedulable graph !!!");
165     for (uint j=0; j<n->len(); j++) { // For all inputs
166       Node* inn = n->in(j); // Get input
167       if (inn == NULL)  continue;  // Ignore NULL, missing inputs
168       Block* inb = cfg->get_block_for_node(inn);
169       tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
170                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
171       inn->dump();
172     }
173     tty->print("Failing node: ");
174     n->dump();
175     assert(false, "unscheduable graph");
176   }
177 }
178 #endif
179 
find_deepest_input(Node * n,const PhaseCFG * cfg)180 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
181   // Find the last input dominated by all other inputs.
182   Block* deepb           = NULL;        // Deepest block so far
183   int    deepb_dom_depth = 0;
184   for (uint k = 0; k < n->len(); k++) { // For all inputs
185     Node* inn = n->in(k);               // Get input
186     if (inn == NULL)  continue;         // Ignore NULL, missing inputs
187     Block* inb = cfg->get_block_for_node(inn);
188     assert(inb != NULL, "must already have scheduled this input");
189     if (deepb_dom_depth < (int) inb->_dom_depth) {
190       // The new inb must be dominated by the previous deepb.
191       // The various inputs must be linearly ordered in the dom
192       // tree, or else there will not be a unique deepest block.
193       DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
194       deepb = inb;                      // Save deepest block
195       deepb_dom_depth = deepb->_dom_depth;
196     }
197   }
198   assert(deepb != NULL, "must be at least one input to n");
199   return deepb;
200 }
201 
202 
203 //------------------------------schedule_early---------------------------------
204 // Find the earliest Block any instruction can be placed in.  Some instructions
205 // are pinned into Blocks.  Unpinned instructions can appear in last block in
206 // which all their inputs occur.
schedule_early(VectorSet & visited,Node_List & roots)207 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
208   // Allocate stack with enough space to avoid frequent realloc
209   Node_Stack nstack(roots.Size() + 8);
210   // _root will be processed among C->top() inputs
211   roots.push(C->top());
212   visited.set(C->top()->_idx);
213 
214   while (roots.size() != 0) {
215     // Use local variables nstack_top_n & nstack_top_i to cache values
216     // on stack's top.
217     Node* parent_node = roots.pop();
218     uint  input_index = 0;
219 
220     while (true) {
221       if (input_index == 0) {
222         // Fixup some control.  Constants without control get attached
223         // to root and nodes that use is_block_proj() nodes should be attached
224         // to the region that starts their block.
225         const Node* control_input = parent_node->in(0);
226         if (control_input != NULL) {
227           replace_block_proj_ctrl(parent_node);
228         } else {
229           // Is a constant with NO inputs?
230           if (parent_node->req() == 1) {
231             parent_node->set_req(0, _root);
232           }
233         }
234       }
235 
236       // First, visit all inputs and force them to get a block.  If an
237       // input is already in a block we quit following inputs (to avoid
238       // cycles). Instead we put that Node on a worklist to be handled
239       // later (since IT'S inputs may not have a block yet).
240 
241       // Assume all n's inputs will be processed
242       bool done = true;
243 
244       while (input_index < parent_node->len()) {
245         Node* in = parent_node->in(input_index++);
246         if (in == NULL) {
247           continue;
248         }
249 
250         int is_visited = visited.test_set(in->_idx);
251         if (!has_block(in)) {
252           if (is_visited) {
253             assert(false, "graph should be schedulable");
254             return false;
255           }
256           // Save parent node and next input's index.
257           nstack.push(parent_node, input_index);
258           // Process current input now.
259           parent_node = in;
260           input_index = 0;
261           // Not all n's inputs processed.
262           done = false;
263           break;
264         } else if (!is_visited) {
265           // Visit this guy later, using worklist
266           roots.push(in);
267         }
268       }
269 
270       if (done) {
271         // All of n's inputs have been processed, complete post-processing.
272 
273         // Some instructions are pinned into a block.  These include Region,
274         // Phi, Start, Return, and other control-dependent instructions and
275         // any projections which depend on them.
276         if (!parent_node->pinned()) {
277           // Set earliest legal block.
278           Block* earliest_block = find_deepest_input(parent_node, this);
279           map_node_to_block(parent_node, earliest_block);
280         } else {
281           assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
282         }
283 
284         if (nstack.is_empty()) {
285           // Finished all nodes on stack.
286           // Process next node on the worklist 'roots'.
287           break;
288         }
289         // Get saved parent node and next input's index.
290         parent_node = nstack.node();
291         input_index = nstack.index();
292         nstack.pop();
293       }
294     }
295   }
296   return true;
297 }
298 
299 //------------------------------dom_lca----------------------------------------
300 // Find least common ancestor in dominator tree
301 // LCA is a current notion of LCA, to be raised above 'this'.
302 // As a convenient boundary condition, return 'this' if LCA is NULL.
303 // Find the LCA of those two nodes.
dom_lca(Block * LCA)304 Block* Block::dom_lca(Block* LCA) {
305   if (LCA == NULL || LCA == this)  return this;
306 
307   Block* anc = this;
308   while (anc->_dom_depth > LCA->_dom_depth)
309     anc = anc->_idom;           // Walk up till anc is as high as LCA
310 
311   while (LCA->_dom_depth > anc->_dom_depth)
312     LCA = LCA->_idom;           // Walk up till LCA is as high as anc
313 
314   while (LCA != anc) {          // Walk both up till they are the same
315     LCA = LCA->_idom;
316     anc = anc->_idom;
317   }
318 
319   return LCA;
320 }
321 
322 //--------------------------raise_LCA_above_use--------------------------------
323 // We are placing a definition, and have been given a def->use edge.
324 // The definition must dominate the use, so move the LCA upward in the
325 // dominator tree to dominate the use.  If the use is a phi, adjust
326 // the LCA only with the phi input paths which actually use this def.
raise_LCA_above_use(Block * LCA,Node * use,Node * def,const PhaseCFG * cfg)327 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
328   Block* buse = cfg->get_block_for_node(use);
329   if (buse == NULL)    return LCA;   // Unused killing Projs have no use block
330   if (!use->is_Phi())  return buse->dom_lca(LCA);
331   uint pmax = use->req();       // Number of Phi inputs
332   // Why does not this loop just break after finding the matching input to
333   // the Phi?  Well...it's like this.  I do not have true def-use/use-def
334   // chains.  Means I cannot distinguish, from the def-use direction, which
335   // of many use-defs lead from the same use to the same def.  That is, this
336   // Phi might have several uses of the same def.  Each use appears in a
337   // different predecessor block.  But when I enter here, I cannot distinguish
338   // which use-def edge I should find the predecessor block for.  So I find
339   // them all.  Means I do a little extra work if a Phi uses the same value
340   // more than once.
341   for (uint j=1; j<pmax; j++) { // For all inputs
342     if (use->in(j) == def) {    // Found matching input?
343       Block* pred = cfg->get_block_for_node(buse->pred(j));
344       LCA = pred->dom_lca(LCA);
345     }
346   }
347   return LCA;
348 }
349 
350 //----------------------------raise_LCA_above_marks----------------------------
351 // Return a new LCA that dominates LCA and any of its marked predecessors.
352 // Search all my parents up to 'early' (exclusive), looking for predecessors
353 // which are marked with the given index.  Return the LCA (in the dom tree)
354 // of all marked blocks.  If there are none marked, return the original
355 // LCA.
raise_LCA_above_marks(Block * LCA,node_idx_t mark,Block * early,const PhaseCFG * cfg)356 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
357   Block_List worklist;
358   worklist.push(LCA);
359   while (worklist.size() > 0) {
360     Block* mid = worklist.pop();
361     if (mid == early)  continue;  // stop searching here
362 
363     // Test and set the visited bit.
364     if (mid->raise_LCA_visited() == mark)  continue;  // already visited
365 
366     // Don't process the current LCA, otherwise the search may terminate early
367     if (mid != LCA && mid->raise_LCA_mark() == mark) {
368       // Raise the LCA.
369       LCA = mid->dom_lca(LCA);
370       if (LCA == early)  break;   // stop searching everywhere
371       assert(early->dominates(LCA), "early is high enough");
372       // Resume searching at that point, skipping intermediate levels.
373       worklist.push(LCA);
374       if (LCA == mid)
375         continue; // Don't mark as visited to avoid early termination.
376     } else {
377       // Keep searching through this block's predecessors.
378       for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
379         Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
380         worklist.push(mid_parent);
381       }
382     }
383     mid->set_raise_LCA_visited(mark);
384   }
385   return LCA;
386 }
387 
388 //--------------------------memory_early_block--------------------------------
389 // This is a variation of find_deepest_input, the heart of schedule_early.
390 // Find the "early" block for a load, if we considered only memory and
391 // address inputs, that is, if other data inputs were ignored.
392 //
393 // Because a subset of edges are considered, the resulting block will
394 // be earlier (at a shallower dom_depth) than the true schedule_early
395 // point of the node. We compute this earlier block as a more permissive
396 // site for anti-dependency insertion, but only if subsume_loads is enabled.
memory_early_block(Node * load,Block * early,const PhaseCFG * cfg)397 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
398   Node* base;
399   Node* index;
400   Node* store = load->in(MemNode::Memory);
401   load->as_Mach()->memory_inputs(base, index);
402 
403   assert(base != NodeSentinel && index != NodeSentinel,
404          "unexpected base/index inputs");
405 
406   Node* mem_inputs[4];
407   int mem_inputs_length = 0;
408   if (base != NULL)  mem_inputs[mem_inputs_length++] = base;
409   if (index != NULL) mem_inputs[mem_inputs_length++] = index;
410   if (store != NULL) mem_inputs[mem_inputs_length++] = store;
411 
412   // In the comparision below, add one to account for the control input,
413   // which may be null, but always takes up a spot in the in array.
414   if (mem_inputs_length + 1 < (int) load->req()) {
415     // This "load" has more inputs than just the memory, base and index inputs.
416     // For purposes of checking anti-dependences, we need to start
417     // from the early block of only the address portion of the instruction,
418     // and ignore other blocks that may have factored into the wider
419     // schedule_early calculation.
420     if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
421 
422     Block* deepb           = NULL;        // Deepest block so far
423     int    deepb_dom_depth = 0;
424     for (int i = 0; i < mem_inputs_length; i++) {
425       Block* inb = cfg->get_block_for_node(mem_inputs[i]);
426       if (deepb_dom_depth < (int) inb->_dom_depth) {
427         // The new inb must be dominated by the previous deepb.
428         // The various inputs must be linearly ordered in the dom
429         // tree, or else there will not be a unique deepest block.
430         DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
431         deepb = inb;                      // Save deepest block
432         deepb_dom_depth = deepb->_dom_depth;
433       }
434     }
435     early = deepb;
436   }
437 
438   return early;
439 }
440 
441 //--------------------------insert_anti_dependences---------------------------
442 // A load may need to witness memory that nearby stores can overwrite.
443 // For each nearby store, either insert an "anti-dependence" edge
444 // from the load to the store, or else move LCA upward to force the
445 // load to (eventually) be scheduled in a block above the store.
446 //
447 // Do not add edges to stores on distinct control-flow paths;
448 // only add edges to stores which might interfere.
449 //
450 // Return the (updated) LCA.  There will not be any possibly interfering
451 // store between the load's "early block" and the updated LCA.
452 // Any stores in the updated LCA will have new precedence edges
453 // back to the load.  The caller is expected to schedule the load
454 // in the LCA, in which case the precedence edges will make LCM
455 // preserve anti-dependences.  The caller may also hoist the load
456 // above the LCA, if it is not the early block.
insert_anti_dependences(Block * LCA,Node * load,bool verify)457 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
458   assert(load->needs_anti_dependence_check(), "must be a load of some sort");
459   assert(LCA != NULL, "");
460   DEBUG_ONLY(Block* LCA_orig = LCA);
461 
462   // Compute the alias index.  Loads and stores with different alias indices
463   // do not need anti-dependence edges.
464   uint load_alias_idx = C->get_alias_index(load->adr_type());
465 #ifdef ASSERT
466   if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&
467       (PrintOpto || VerifyAliases ||
468        PrintMiscellaneous && (WizardMode || Verbose))) {
469     // Load nodes should not consume all of memory.
470     // Reporting a bottom type indicates a bug in adlc.
471     // If some particular type of node validly consumes all of memory,
472     // sharpen the preceding "if" to exclude it, so we can catch bugs here.
473     tty->print_cr("*** Possible Anti-Dependence Bug:  Load consumes all of memory.");
474     load->dump(2);
475     if (VerifyAliases)  assert(load_alias_idx != Compile::AliasIdxBot, "");
476   }
477 #endif
478   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp),
479          "String compare is only known 'load' that does not conflict with any stores");
480   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals),
481          "String equals is a 'load' that does not conflict with any stores");
482   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf),
483          "String indexOf is a 'load' that does not conflict with any stores");
484   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq),
485          "Arrays equals is a 'load' that do not conflict with any stores");
486 
487   if (!C->alias_type(load_alias_idx)->is_rewritable()) {
488     // It is impossible to spoil this load by putting stores before it,
489     // because we know that the stores will never update the value
490     // which 'load' must witness.
491     return LCA;
492   }
493 
494   node_idx_t load_index = load->_idx;
495 
496   // Note the earliest legal placement of 'load', as determined by
497   // by the unique point in the dom tree where all memory effects
498   // and other inputs are first available.  (Computed by schedule_early.)
499   // For normal loads, 'early' is the shallowest place (dom graph wise)
500   // to look for anti-deps between this load and any store.
501   Block* early = get_block_for_node(load);
502 
503   // If we are subsuming loads, compute an "early" block that only considers
504   // memory or address inputs. This block may be different than the
505   // schedule_early block in that it could be at an even shallower depth in the
506   // dominator tree, and allow for a broader discovery of anti-dependences.
507   if (C->subsume_loads()) {
508     early = memory_early_block(load, early, this);
509   }
510 
511   ResourceArea *area = Thread::current()->resource_area();
512   Node_List worklist_mem(area);     // prior memory state to store
513   Node_List worklist_store(area);   // possible-def to explore
514   Node_List worklist_visited(area); // visited mergemem nodes
515   Node_List non_early_stores(area); // all relevant stores outside of early
516   bool must_raise_LCA = false;
517 
518 #ifdef TRACK_PHI_INPUTS
519   // %%% This extra checking fails because MergeMem nodes are not GVNed.
520   // Provide "phi_inputs" to check if every input to a PhiNode is from the
521   // original memory state.  This indicates a PhiNode for which should not
522   // prevent the load from sinking.  For such a block, set_raise_LCA_mark
523   // may be overly conservative.
524   // Mechanism: count inputs seen for each Phi encountered in worklist_store.
525   DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0));
526 #endif
527 
528   // 'load' uses some memory state; look for users of the same state.
529   // Recurse through MergeMem nodes to the stores that use them.
530 
531   // Each of these stores is a possible definition of memory
532   // that 'load' needs to use.  We need to force 'load'
533   // to occur before each such store.  When the store is in
534   // the same block as 'load', we insert an anti-dependence
535   // edge load->store.
536 
537   // The relevant stores "nearby" the load consist of a tree rooted
538   // at initial_mem, with internal nodes of type MergeMem.
539   // Therefore, the branches visited by the worklist are of this form:
540   //    initial_mem -> (MergeMem ->)* store
541   // The anti-dependence constraints apply only to the fringe of this tree.
542 
543   Node* initial_mem = load->in(MemNode::Memory);
544   worklist_store.push(initial_mem);
545   worklist_visited.push(initial_mem);
546   worklist_mem.push(NULL);
547   while (worklist_store.size() > 0) {
548     // Examine a nearby store to see if it might interfere with our load.
549     Node* mem   = worklist_mem.pop();
550     Node* store = worklist_store.pop();
551     uint op = store->Opcode();
552 
553     // MergeMems do not directly have anti-deps.
554     // Treat them as internal nodes in a forward tree of memory states,
555     // the leaves of which are each a 'possible-def'.
556     if (store == initial_mem    // root (exclusive) of tree we are searching
557         || op == Op_MergeMem    // internal node of tree we are searching
558         ) {
559       mem = store;   // It's not a possibly interfering store.
560       if (store == initial_mem)
561         initial_mem = NULL;  // only process initial memory once
562 
563       for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
564         store = mem->fast_out(i);
565         if (store->is_MergeMem()) {
566           // Be sure we don't get into combinatorial problems.
567           // (Allow phis to be repeated; they can merge two relevant states.)
568           uint j = worklist_visited.size();
569           for (; j > 0; j--) {
570             if (worklist_visited.at(j-1) == store)  break;
571           }
572           if (j > 0)  continue; // already on work list; do not repeat
573           worklist_visited.push(store);
574         }
575         worklist_mem.push(mem);
576         worklist_store.push(store);
577       }
578       continue;
579     }
580 
581     if (op == Op_MachProj || op == Op_Catch)   continue;
582     if (store->needs_anti_dependence_check())  continue;  // not really a store
583 
584     // Compute the alias index.  Loads and stores with different alias
585     // indices do not need anti-dependence edges.  Wide MemBar's are
586     // anti-dependent on everything (except immutable memories).
587     const TypePtr* adr_type = store->adr_type();
588     if (!C->can_alias(adr_type, load_alias_idx))  continue;
589 
590     // Most slow-path runtime calls do NOT modify Java memory, but
591     // they can block and so write Raw memory.
592     if (store->is_Mach()) {
593       MachNode* mstore = store->as_Mach();
594       if (load_alias_idx != Compile::AliasIdxRaw) {
595         // Check for call into the runtime using the Java calling
596         // convention (and from there into a wrapper); it has no
597         // _method.  Can't do this optimization for Native calls because
598         // they CAN write to Java memory.
599         if (mstore->ideal_Opcode() == Op_CallStaticJava) {
600           assert(mstore->is_MachSafePoint(), "");
601           MachSafePointNode* ms = (MachSafePointNode*) mstore;
602           assert(ms->is_MachCallJava(), "");
603           MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
604           if (mcj->_method == NULL) {
605             // These runtime calls do not write to Java visible memory
606             // (other than Raw) and so do not require anti-dependence edges.
607             continue;
608           }
609         }
610         // Same for SafePoints: they read/write Raw but only read otherwise.
611         // This is basically a workaround for SafePoints only defining control
612         // instead of control + memory.
613         if (mstore->ideal_Opcode() == Op_SafePoint)
614           continue;
615       } else {
616         // Some raw memory, such as the load of "top" at an allocation,
617         // can be control dependent on the previous safepoint. See
618         // comments in GraphKit::allocate_heap() about control input.
619         // Inserting an anti-dep between such a safepoint and a use
620         // creates a cycle, and will cause a subsequent failure in
621         // local scheduling.  (BugId 4919904)
622         // (%%% How can a control input be a safepoint and not a projection??)
623         if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
624           continue;
625       }
626     }
627 
628     // Identify a block that the current load must be above,
629     // or else observe that 'store' is all the way up in the
630     // earliest legal block for 'load'.  In the latter case,
631     // immediately insert an anti-dependence edge.
632     Block* store_block = get_block_for_node(store);
633     assert(store_block != NULL, "unused killing projections skipped above");
634 
635     if (store->is_Phi()) {
636       // 'load' uses memory which is one (or more) of the Phi's inputs.
637       // It must be scheduled not before the Phi, but rather before
638       // each of the relevant Phi inputs.
639       //
640       // Instead of finding the LCA of all inputs to a Phi that match 'mem',
641       // we mark each corresponding predecessor block and do a combined
642       // hoisting operation later (raise_LCA_above_marks).
643       //
644       // Do not assert(store_block != early, "Phi merging memory after access")
645       // PhiNode may be at start of block 'early' with backedge to 'early'
646       DEBUG_ONLY(bool found_match = false);
647       for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
648         if (store->in(j) == mem) {   // Found matching input?
649           DEBUG_ONLY(found_match = true);
650           Block* pred_block = get_block_for_node(store_block->pred(j));
651           if (pred_block != early) {
652             // If any predecessor of the Phi matches the load's "early block",
653             // we do not need a precedence edge between the Phi and 'load'
654             // since the load will be forced into a block preceding the Phi.
655             pred_block->set_raise_LCA_mark(load_index);
656             assert(!LCA_orig->dominates(pred_block) ||
657                    early->dominates(pred_block), "early is high enough");
658             must_raise_LCA = true;
659           } else {
660             // anti-dependent upon PHI pinned below 'early', no edge needed
661             LCA = early;             // but can not schedule below 'early'
662           }
663         }
664       }
665       assert(found_match, "no worklist bug");
666 #ifdef TRACK_PHI_INPUTS
667 #ifdef ASSERT
668       // This assert asks about correct handling of PhiNodes, which may not
669       // have all input edges directly from 'mem'. See BugId 4621264
670       int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1;
671       // Increment by exactly one even if there are multiple copies of 'mem'
672       // coming into the phi, because we will run this block several times
673       // if there are several copies of 'mem'.  (That's how DU iterators work.)
674       phi_inputs.at_put(store->_idx, num_mem_inputs);
675       assert(PhiNode::Input + num_mem_inputs < store->req(),
676              "Expect at least one phi input will not be from original memory state");
677 #endif //ASSERT
678 #endif //TRACK_PHI_INPUTS
679     } else if (store_block != early) {
680       // 'store' is between the current LCA and earliest possible block.
681       // Label its block, and decide later on how to raise the LCA
682       // to include the effect on LCA of this store.
683       // If this store's block gets chosen as the raised LCA, we
684       // will find him on the non_early_stores list and stick him
685       // with a precedence edge.
686       // (But, don't bother if LCA is already raised all the way.)
687       if (LCA != early) {
688         store_block->set_raise_LCA_mark(load_index);
689         must_raise_LCA = true;
690         non_early_stores.push(store);
691       }
692     } else {
693       // Found a possibly-interfering store in the load's 'early' block.
694       // This means 'load' cannot sink at all in the dominator tree.
695       // Add an anti-dep edge, and squeeze 'load' into the highest block.
696       assert(store != load->in(0), "dependence cycle found");
697       if (verify) {
698         assert(store->find_edge(load) != -1, "missing precedence edge");
699       } else {
700         store->add_prec(load);
701       }
702       LCA = early;
703       // This turns off the process of gathering non_early_stores.
704     }
705   }
706   // (Worklist is now empty; all nearby stores have been visited.)
707 
708   // Finished if 'load' must be scheduled in its 'early' block.
709   // If we found any stores there, they have already been given
710   // precedence edges.
711   if (LCA == early)  return LCA;
712 
713   // We get here only if there are no possibly-interfering stores
714   // in the load's 'early' block.  Move LCA up above all predecessors
715   // which contain stores we have noted.
716   //
717   // The raised LCA block can be a home to such interfering stores,
718   // but its predecessors must not contain any such stores.
719   //
720   // The raised LCA will be a lower bound for placing the load,
721   // preventing the load from sinking past any block containing
722   // a store that may invalidate the memory state required by 'load'.
723   if (must_raise_LCA)
724     LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
725   if (LCA == early)  return LCA;
726 
727   // Insert anti-dependence edges from 'load' to each store
728   // in the non-early LCA block.
729   // Mine the non_early_stores list for such stores.
730   if (LCA->raise_LCA_mark() == load_index) {
731     while (non_early_stores.size() > 0) {
732       Node* store = non_early_stores.pop();
733       Block* store_block = get_block_for_node(store);
734       if (store_block == LCA) {
735         // add anti_dependence from store to load in its own block
736         assert(store != load->in(0), "dependence cycle found");
737         if (verify) {
738           assert(store->find_edge(load) != -1, "missing precedence edge");
739         } else {
740           store->add_prec(load);
741         }
742       } else {
743         assert(store_block->raise_LCA_mark() == load_index, "block was marked");
744         // Any other stores we found must be either inside the new LCA
745         // or else outside the original LCA.  In the latter case, they
746         // did not interfere with any use of 'load'.
747         assert(LCA->dominates(store_block)
748                || !LCA_orig->dominates(store_block), "no stray stores");
749       }
750     }
751   }
752 
753   // Return the highest block containing stores; any stores
754   // within that block have been given anti-dependence edges.
755   return LCA;
756 }
757 
758 // This class is used to iterate backwards over the nodes in the graph.
759 
760 class Node_Backward_Iterator {
761 
762 private:
763   Node_Backward_Iterator();
764 
765 public:
766   // Constructor for the iterator
767   Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
768 
769   // Postincrement operator to iterate over the nodes
770   Node *next();
771 
772 private:
773   VectorSet   &_visited;
774   Node_List   &_stack;
775   PhaseCFG &_cfg;
776 };
777 
778 // Constructor for the Node_Backward_Iterator
Node_Backward_Iterator(Node * root,VectorSet & visited,Node_List & stack,PhaseCFG & cfg)779 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
780   : _visited(visited), _stack(stack), _cfg(cfg) {
781   // The stack should contain exactly the root
782   stack.clear();
783   stack.push(root);
784 
785   // Clear the visited bits
786   visited.Clear();
787 }
788 
789 // Iterator for the Node_Backward_Iterator
next()790 Node *Node_Backward_Iterator::next() {
791 
792   // If the _stack is empty, then just return NULL: finished.
793   if ( !_stack.size() )
794     return NULL;
795 
796   // '_stack' is emulating a real _stack.  The 'visit-all-users' loop has been
797   // made stateless, so I do not need to record the index 'i' on my _stack.
798   // Instead I visit all users each time, scanning for unvisited users.
799   // I visit unvisited not-anti-dependence users first, then anti-dependent
800   // children next.
801   Node *self = _stack.pop();
802 
803   // I cycle here when I am entering a deeper level of recursion.
804   // The key variable 'self' was set prior to jumping here.
805   while( 1 ) {
806 
807     _visited.set(self->_idx);
808 
809     // Now schedule all uses as late as possible.
810     const Node* src = self->is_Proj() ? self->in(0) : self;
811     uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
812 
813     // Schedule all nodes in a post-order visit
814     Node *unvisited = NULL;  // Unvisited anti-dependent Node, if any
815 
816     // Scan for unvisited nodes
817     for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
818       // For all uses, schedule late
819       Node* n = self->fast_out(i); // Use
820 
821       // Skip already visited children
822       if ( _visited.test(n->_idx) )
823         continue;
824 
825       // do not traverse backward control edges
826       Node *use = n->is_Proj() ? n->in(0) : n;
827       uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
828 
829       if ( use_rpo < src_rpo )
830         continue;
831 
832       // Phi nodes always precede uses in a basic block
833       if ( use_rpo == src_rpo && use->is_Phi() )
834         continue;
835 
836       unvisited = n;      // Found unvisited
837 
838       // Check for possible-anti-dependent
839       if( !n->needs_anti_dependence_check() )
840         break;            // Not visited, not anti-dep; schedule it NOW
841     }
842 
843     // Did I find an unvisited not-anti-dependent Node?
844     if ( !unvisited )
845       break;                  // All done with children; post-visit 'self'
846 
847     // Visit the unvisited Node.  Contains the obvious push to
848     // indicate I'm entering a deeper level of recursion.  I push the
849     // old state onto the _stack and set a new state and loop (recurse).
850     _stack.push(self);
851     self = unvisited;
852   } // End recursion loop
853 
854   return self;
855 }
856 
857 //------------------------------ComputeLatenciesBackwards----------------------
858 // Compute the latency of all the instructions.
compute_latencies_backwards(VectorSet & visited,Node_List & stack)859 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) {
860 #ifndef PRODUCT
861   if (trace_opto_pipelining())
862     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
863 #endif
864 
865   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
866   Node *n;
867 
868   // Walk over all the nodes from last to first
869   while (n = iter.next()) {
870     // Set the latency for the definitions of this instruction
871     partial_latency_of_defs(n);
872   }
873 } // end ComputeLatenciesBackwards
874 
875 //------------------------------partial_latency_of_defs------------------------
876 // Compute the latency impact of this node on all defs.  This computes
877 // a number that increases as we approach the beginning of the routine.
partial_latency_of_defs(Node * n)878 void PhaseCFG::partial_latency_of_defs(Node *n) {
879   // Set the latency for this instruction
880 #ifndef PRODUCT
881   if (trace_opto_pipelining()) {
882     tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
883     dump();
884   }
885 #endif
886 
887   if (n->is_Proj()) {
888     n = n->in(0);
889   }
890 
891   if (n->is_Root()) {
892     return;
893   }
894 
895   uint nlen = n->len();
896   uint use_latency = get_latency_for_node(n);
897   uint use_pre_order = get_block_for_node(n)->_pre_order;
898 
899   for (uint j = 0; j < nlen; j++) {
900     Node *def = n->in(j);
901 
902     if (!def || def == n) {
903       continue;
904     }
905 
906     // Walk backwards thru projections
907     if (def->is_Proj()) {
908       def = def->in(0);
909     }
910 
911 #ifndef PRODUCT
912     if (trace_opto_pipelining()) {
913       tty->print("#    in(%2d): ", j);
914       def->dump();
915     }
916 #endif
917 
918     // If the defining block is not known, assume it is ok
919     Block *def_block = get_block_for_node(def);
920     uint def_pre_order = def_block ? def_block->_pre_order : 0;
921 
922     if ((use_pre_order <  def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
923       continue;
924     }
925 
926     uint delta_latency = n->latency(j);
927     uint current_latency = delta_latency + use_latency;
928 
929     if (get_latency_for_node(def) < current_latency) {
930       set_latency_for_node(def, current_latency);
931     }
932 
933 #ifndef PRODUCT
934     if (trace_opto_pipelining()) {
935       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
936     }
937 #endif
938   }
939 }
940 
941 //------------------------------latency_from_use-------------------------------
942 // Compute the latency of a specific use
latency_from_use(Node * n,const Node * def,Node * use)943 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
944   // If self-reference, return no latency
945   if (use == n || use->is_Root()) {
946     return 0;
947   }
948 
949   uint def_pre_order = get_block_for_node(def)->_pre_order;
950   uint latency = 0;
951 
952   // If the use is not a projection, then it is simple...
953   if (!use->is_Proj()) {
954 #ifndef PRODUCT
955     if (trace_opto_pipelining()) {
956       tty->print("#    out(): ");
957       use->dump();
958     }
959 #endif
960 
961     uint use_pre_order = get_block_for_node(use)->_pre_order;
962 
963     if (use_pre_order < def_pre_order)
964       return 0;
965 
966     if (use_pre_order == def_pre_order && use->is_Phi())
967       return 0;
968 
969     uint nlen = use->len();
970     uint nl = get_latency_for_node(use);
971 
972     for ( uint j=0; j<nlen; j++ ) {
973       if (use->in(j) == n) {
974         // Change this if we want local latencies
975         uint ul = use->latency(j);
976         uint  l = ul + nl;
977         if (latency < l) latency = l;
978 #ifndef PRODUCT
979         if (trace_opto_pipelining()) {
980           tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, latency = %d",
981                         nl, j, ul, l, latency);
982         }
983 #endif
984       }
985     }
986   } else {
987     // This is a projection, just grab the latency of the use(s)
988     for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
989       uint l = latency_from_use(use, def, use->fast_out(j));
990       if (latency < l) latency = l;
991     }
992   }
993 
994   return latency;
995 }
996 
997 //------------------------------latency_from_uses------------------------------
998 // Compute the latency of this instruction relative to all of it's uses.
999 // This computes a number that increases as we approach the beginning of the
1000 // routine.
latency_from_uses(Node * n)1001 void PhaseCFG::latency_from_uses(Node *n) {
1002   // Set the latency for this instruction
1003 #ifndef PRODUCT
1004   if (trace_opto_pipelining()) {
1005     tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1006     dump();
1007   }
1008 #endif
1009   uint latency=0;
1010   const Node *def = n->is_Proj() ? n->in(0): n;
1011 
1012   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1013     uint l = latency_from_use(n, def, n->fast_out(i));
1014 
1015     if (latency < l) latency = l;
1016   }
1017 
1018   set_latency_for_node(n, latency);
1019 }
1020 
1021 //------------------------------hoist_to_cheaper_block-------------------------
1022 // Pick a block for node self, between early and LCA, that is a cheaper
1023 // alternative to LCA.
hoist_to_cheaper_block(Block * LCA,Block * early,Node * self)1024 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1025   const double delta = 1+PROB_UNLIKELY_MAG(4);
1026   Block* least       = LCA;
1027   double least_freq  = least->_freq;
1028   uint target        = get_latency_for_node(self);
1029   uint start_latency = get_latency_for_node(LCA->head());
1030   uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1031   bool in_latency    = (target <= start_latency);
1032   const Block* root_block = get_block_for_node(_root);
1033 
1034   // Turn off latency scheduling if scheduling is just plain off
1035   if (!C->do_scheduling())
1036     in_latency = true;
1037 
1038   // Do not hoist (to cover latency) instructions which target a
1039   // single register.  Hoisting stretches the live range of the
1040   // single register and may force spilling.
1041   MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1042   if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
1043     in_latency = true;
1044 
1045 #ifndef PRODUCT
1046   if (trace_opto_pipelining()) {
1047     tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1048     self->dump();
1049     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1050       LCA->_pre_order,
1051       LCA->head()->_idx,
1052       start_latency,
1053       LCA->get_node(LCA->end_idx())->_idx,
1054       end_latency,
1055       least_freq);
1056   }
1057 #endif
1058 
1059   int cand_cnt = 0;  // number of candidates tried
1060 
1061   // Walk up the dominator tree from LCA (Lowest common ancestor) to
1062   // the earliest legal location.  Capture the least execution frequency.
1063   while (LCA != early) {
1064     LCA = LCA->_idom;         // Follow up the dominator tree
1065 
1066     if (LCA == NULL) {
1067       // Bailout without retry
1068       assert(false, "graph should be schedulable");
1069       C->record_method_not_compilable("late schedule failed: LCA == NULL");
1070       return least;
1071     }
1072 
1073     // Don't hoist machine instructions to the root basic block
1074     if (mach && LCA == root_block)
1075       break;
1076 
1077     uint start_lat = get_latency_for_node(LCA->head());
1078     uint end_idx   = LCA->end_idx();
1079     uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
1080     double LCA_freq = LCA->_freq;
1081 #ifndef PRODUCT
1082     if (trace_opto_pipelining()) {
1083       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1084         LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1085     }
1086 #endif
1087     cand_cnt++;
1088     if (LCA_freq < least_freq              || // Better Frequency
1089         (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode
1090          (!StressGCM                    &&    // Otherwise, choose with latency
1091           !in_latency                   &&    // No block containing latency
1092           LCA_freq < least_freq * delta &&    // No worse frequency
1093           target >= end_lat             &&    // within latency range
1094           !self->is_iteratively_computed() )  // But don't hoist IV increments
1095              // because they may end up above other uses of their phi forcing
1096              // their result register to be different from their input.
1097        ) {
1098       least = LCA;            // Found cheaper block
1099       least_freq = LCA_freq;
1100       start_latency = start_lat;
1101       end_latency = end_lat;
1102       if (target <= start_lat)
1103         in_latency = true;
1104     }
1105   }
1106 
1107 #ifndef PRODUCT
1108   if (trace_opto_pipelining()) {
1109     tty->print_cr("#  Choose block B%d with start latency=%d and freq=%g",
1110       least->_pre_order, start_latency, least_freq);
1111   }
1112 #endif
1113 
1114   // See if the latency needs to be updated
1115   if (target < end_latency) {
1116 #ifndef PRODUCT
1117     if (trace_opto_pipelining()) {
1118       tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1119     }
1120 #endif
1121     set_latency_for_node(self, end_latency);
1122     partial_latency_of_defs(self);
1123   }
1124 
1125   return least;
1126 }
1127 
1128 
1129 //------------------------------schedule_late-----------------------------------
1130 // Now schedule all codes as LATE as possible.  This is the LCA in the
1131 // dominator tree of all USES of a value.  Pick the block with the least
1132 // loop nesting depth that is lowest in the dominator tree.
1133 extern const char must_clone[];
schedule_late(VectorSet & visited,Node_List & stack)1134 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
1135 #ifndef PRODUCT
1136   if (trace_opto_pipelining())
1137     tty->print("\n#---- schedule_late ----\n");
1138 #endif
1139 
1140   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1141   Node *self;
1142 
1143   // Walk over all the nodes from last to first
1144   while (self = iter.next()) {
1145     Block* early = get_block_for_node(self); // Earliest legal placement
1146 
1147     if (self->is_top()) {
1148       // Top node goes in bb #2 with other constants.
1149       // It must be special-cased, because it has no out edges.
1150       early->add_inst(self);
1151       continue;
1152     }
1153 
1154     // No uses, just terminate
1155     if (self->outcnt() == 0) {
1156       assert(self->is_MachProj(), "sanity");
1157       continue;                   // Must be a dead machine projection
1158     }
1159 
1160     // If node is pinned in the block, then no scheduling can be done.
1161     if( self->pinned() )          // Pinned in block?
1162       continue;
1163 
1164     MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1165     if (mach) {
1166       switch (mach->ideal_Opcode()) {
1167       case Op_CreateEx:
1168         // Don't move exception creation
1169         early->add_inst(self);
1170         continue;
1171         break;
1172       case Op_CheckCastPP:
1173         // Don't move CheckCastPP nodes away from their input, if the input
1174         // is a rawptr (5071820).
1175         Node *def = self->in(1);
1176         if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
1177           early->add_inst(self);
1178 #ifdef ASSERT
1179           _raw_oops.push(def);
1180 #endif
1181           continue;
1182         }
1183         break;
1184       }
1185     }
1186 
1187     // Gather LCA of all uses
1188     Block *LCA = NULL;
1189     {
1190       for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1191         // For all uses, find LCA
1192         Node* use = self->fast_out(i);
1193         LCA = raise_LCA_above_use(LCA, use, self, this);
1194       }
1195     }  // (Hide defs of imax, i from rest of block.)
1196 
1197     // Place temps in the block of their use.  This isn't a
1198     // requirement for correctness but it reduces useless
1199     // interference between temps and other nodes.
1200     if (mach != NULL && mach->is_MachTemp()) {
1201       map_node_to_block(self, LCA);
1202       LCA->add_inst(self);
1203       continue;
1204     }
1205 
1206     // Check if 'self' could be anti-dependent on memory
1207     if (self->needs_anti_dependence_check()) {
1208       // Hoist LCA above possible-defs and insert anti-dependences to
1209       // defs in new LCA block.
1210       LCA = insert_anti_dependences(LCA, self);
1211     }
1212 
1213     if (early->_dom_depth > LCA->_dom_depth) {
1214       // Somehow the LCA has moved above the earliest legal point.
1215       // (One way this can happen is via memory_early_block.)
1216       if (C->subsume_loads() == true && !C->failing()) {
1217         // Retry with subsume_loads == false
1218         // If this is the first failure, the sentinel string will "stick"
1219         // to the Compile object, and the C2Compiler will see it and retry.
1220         C->record_failure(C2Compiler::retry_no_subsuming_loads());
1221       } else {
1222         // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1223         assert(false, "graph should be schedulable");
1224         C->record_method_not_compilable("late schedule failed: incorrect graph");
1225       }
1226       return;
1227     }
1228 
1229     // If there is no opportunity to hoist, then we're done.
1230     // In stress mode, try to hoist even the single operations.
1231     bool try_to_hoist = StressGCM || (LCA != early);
1232 
1233     // Must clone guys stay next to use; no hoisting allowed.
1234     // Also cannot hoist guys that alter memory or are otherwise not
1235     // allocatable (hoisting can make a value live longer, leading to
1236     // anti and output dependency problems which are normally resolved
1237     // by the register allocator giving everyone a different register).
1238     if (mach != NULL && must_clone[mach->ideal_Opcode()])
1239       try_to_hoist = false;
1240 
1241     Block* late = NULL;
1242     if (try_to_hoist) {
1243       // Now find the block with the least execution frequency.
1244       // Start at the latest schedule and work up to the earliest schedule
1245       // in the dominator tree.  Thus the Node will dominate all its uses.
1246       late = hoist_to_cheaper_block(LCA, early, self);
1247     } else {
1248       // Just use the LCA of the uses.
1249       late = LCA;
1250     }
1251 
1252     // Put the node into target block
1253     schedule_node_into_block(self, late);
1254 
1255 #ifdef ASSERT
1256     if (self->needs_anti_dependence_check()) {
1257       // since precedence edges are only inserted when we're sure they
1258       // are needed make sure that after placement in a block we don't
1259       // need any new precedence edges.
1260       verify_anti_dependences(late, self);
1261     }
1262 #endif
1263   } // Loop until all nodes have been visited
1264 
1265 } // end ScheduleLate
1266 
1267 //------------------------------GlobalCodeMotion-------------------------------
global_code_motion()1268 void PhaseCFG::global_code_motion() {
1269   ResourceMark rm;
1270 
1271 #ifndef PRODUCT
1272   if (trace_opto_pipelining()) {
1273     tty->print("\n---- Start GlobalCodeMotion ----\n");
1274   }
1275 #endif
1276 
1277   // Initialize the node to block mapping for things on the proj_list
1278   for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1279     unmap_node_from_block(_matcher.get_projection(i));
1280   }
1281 
1282   // Set the basic block for Nodes pinned into blocks
1283   Arena* arena = Thread::current()->resource_area();
1284   VectorSet visited(arena);
1285   schedule_pinned_nodes(visited);
1286 
1287   // Find the earliest Block any instruction can be placed in.  Some
1288   // instructions are pinned into Blocks.  Unpinned instructions can
1289   // appear in last block in which all their inputs occur.
1290   visited.Clear();
1291   Node_List stack(arena);
1292   // Pre-grow the list
1293   stack.map((C->live_nodes() >> 1) + 16, NULL);
1294   if (!schedule_early(visited, stack)) {
1295     // Bailout without retry
1296     C->record_method_not_compilable("early schedule failed");
1297     return;
1298   }
1299 
1300   // Build Def-Use edges.
1301   // Compute the latency information (via backwards walk) for all the
1302   // instructions in the graph
1303   _node_latency = new GrowableArray<uint>(); // resource_area allocation
1304 
1305   if (C->do_scheduling()) {
1306     compute_latencies_backwards(visited, stack);
1307   }
1308 
1309   // Now schedule all codes as LATE as possible.  This is the LCA in the
1310   // dominator tree of all USES of a value.  Pick the block with the least
1311   // loop nesting depth that is lowest in the dominator tree.
1312   // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
1313   schedule_late(visited, stack);
1314   if (C->failing()) {
1315     return;
1316   }
1317 
1318 #ifndef PRODUCT
1319   if (trace_opto_pipelining()) {
1320     tty->print("\n---- Detect implicit null checks ----\n");
1321   }
1322 #endif
1323 
1324   // Detect implicit-null-check opportunities.  Basically, find NULL checks
1325   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
1326   // I can generate a memory op if there is not one nearby.
1327   if (C->is_method_compilation()) {
1328     // By reversing the loop direction we get a very minor gain on mpegaudio.
1329     // Feel free to revert to a forward loop for clarity.
1330     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1331     for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1332       Node* proj = _matcher._null_check_tests[i];
1333       Node* val  = _matcher._null_check_tests[i + 1];
1334       Block* block = get_block_for_node(proj);
1335       implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1336       // The implicit_null_check will only perform the transformation
1337       // if the null branch is truly uncommon, *and* it leads to an
1338       // uncommon trap.  Combined with the too_many_traps guards
1339       // above, this prevents SEGV storms reported in 6366351,
1340       // by recompiling offending methods without this optimization.
1341     }
1342   }
1343 
1344 #ifndef PRODUCT
1345   if (trace_opto_pipelining()) {
1346     tty->print("\n---- Start Local Scheduling ----\n");
1347   }
1348 #endif
1349 
1350   // Schedule locally.  Right now a simple topological sort.
1351   // Later, do a real latency aware scheduler.
1352   GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1353   visited.Clear();
1354   for (uint i = 0; i < number_of_blocks(); i++) {
1355     Block* block = get_block(i);
1356     if (!schedule_local(block, ready_cnt, visited)) {
1357       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1358         C->record_method_not_compilable("local schedule failed");
1359       }
1360       return;
1361     }
1362   }
1363 
1364   // If we inserted any instructions between a Call and his CatchNode,
1365   // clone the instructions on all paths below the Catch.
1366   for (uint i = 0; i < number_of_blocks(); i++) {
1367     Block* block = get_block(i);
1368     call_catch_cleanup(block);
1369   }
1370 
1371 #ifndef PRODUCT
1372   if (trace_opto_pipelining()) {
1373     tty->print("\n---- After GlobalCodeMotion ----\n");
1374     for (uint i = 0; i < number_of_blocks(); i++) {
1375       Block* block = get_block(i);
1376       block->dump();
1377     }
1378   }
1379 #endif
1380   // Dead.
1381   _node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);
1382 }
1383 
do_global_code_motion()1384 bool PhaseCFG::do_global_code_motion() {
1385 
1386   build_dominator_tree();
1387   if (C->failing()) {
1388     return false;
1389   }
1390 
1391   NOT_PRODUCT( C->verify_graph_edges(); )
1392 
1393   estimate_block_frequency();
1394 
1395   global_code_motion();
1396 
1397   if (C->failing()) {
1398     return false;
1399   }
1400 
1401   return true;
1402 }
1403 
1404 //------------------------------Estimate_Block_Frequency-----------------------
1405 // Estimate block frequencies based on IfNode probabilities.
estimate_block_frequency()1406 void PhaseCFG::estimate_block_frequency() {
1407 
1408   // Force conditional branches leading to uncommon traps to be unlikely,
1409   // not because we get to the uncommon_trap with less relative frequency,
1410   // but because an uncommon_trap typically causes a deopt, so we only get
1411   // there once.
1412   if (C->do_freq_based_layout()) {
1413     Block_List worklist;
1414     Block* root_blk = get_block(0);
1415     for (uint i = 1; i < root_blk->num_preds(); i++) {
1416       Block *pb = get_block_for_node(root_blk->pred(i));
1417       if (pb->has_uncommon_code()) {
1418         worklist.push(pb);
1419       }
1420     }
1421     while (worklist.size() > 0) {
1422       Block* uct = worklist.pop();
1423       if (uct == get_root_block()) {
1424         continue;
1425       }
1426       for (uint i = 1; i < uct->num_preds(); i++) {
1427         Block *pb = get_block_for_node(uct->pred(i));
1428         if (pb->_num_succs == 1) {
1429           worklist.push(pb);
1430         } else if (pb->num_fall_throughs() == 2) {
1431           pb->update_uncommon_branch(uct);
1432         }
1433       }
1434     }
1435   }
1436 
1437   // Create the loop tree and calculate loop depth.
1438   _root_loop = create_loop_tree();
1439   _root_loop->compute_loop_depth(0);
1440 
1441   // Compute block frequency of each block, relative to a single loop entry.
1442   _root_loop->compute_freq();
1443 
1444   // Adjust all frequencies to be relative to a single method entry
1445   _root_loop->_freq = 1.0;
1446   _root_loop->scale_freq();
1447 
1448   // Save outmost loop frequency for LRG frequency threshold
1449   _outer_loop_frequency = _root_loop->outer_loop_freq();
1450 
1451   // force paths ending at uncommon traps to be infrequent
1452   if (!C->do_freq_based_layout()) {
1453     Block_List worklist;
1454     Block* root_blk = get_block(0);
1455     for (uint i = 1; i < root_blk->num_preds(); i++) {
1456       Block *pb = get_block_for_node(root_blk->pred(i));
1457       if (pb->has_uncommon_code()) {
1458         worklist.push(pb);
1459       }
1460     }
1461     while (worklist.size() > 0) {
1462       Block* uct = worklist.pop();
1463       uct->_freq = PROB_MIN;
1464       for (uint i = 1; i < uct->num_preds(); i++) {
1465         Block *pb = get_block_for_node(uct->pred(i));
1466         if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1467           worklist.push(pb);
1468         }
1469       }
1470     }
1471   }
1472 
1473 #ifdef ASSERT
1474   for (uint i = 0; i < number_of_blocks(); i++) {
1475     Block* b = get_block(i);
1476     assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
1477   }
1478 #endif
1479 
1480 #ifndef PRODUCT
1481   if (PrintCFGBlockFreq) {
1482     tty->print_cr("CFG Block Frequencies");
1483     _root_loop->dump_tree();
1484     if (Verbose) {
1485       tty->print_cr("PhaseCFG dump");
1486       dump();
1487       tty->print_cr("Node dump");
1488       _root->dump(99999);
1489     }
1490   }
1491 #endif
1492 }
1493 
1494 //----------------------------create_loop_tree--------------------------------
1495 // Create a loop tree from the CFG
create_loop_tree()1496 CFGLoop* PhaseCFG::create_loop_tree() {
1497 
1498 #ifdef ASSERT
1499   assert(get_block(0) == get_root_block(), "first block should be root block");
1500   for (uint i = 0; i < number_of_blocks(); i++) {
1501     Block* block = get_block(i);
1502     // Check that _loop field are clear...we could clear them if not.
1503     assert(block->_loop == NULL, "clear _loop expected");
1504     // Sanity check that the RPO numbering is reflected in the _blocks array.
1505     // It doesn't have to be for the loop tree to be built, but if it is not,
1506     // then the blocks have been reordered since dom graph building...which
1507     // may question the RPO numbering
1508     assert(block->_rpo == i, "unexpected reverse post order number");
1509   }
1510 #endif
1511 
1512   int idct = 0;
1513   CFGLoop* root_loop = new CFGLoop(idct++);
1514 
1515   Block_List worklist;
1516 
1517   // Assign blocks to loops
1518   for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
1519     Block* block = get_block(i);
1520 
1521     if (block->head()->is_Loop()) {
1522       Block* loop_head = block;
1523       assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1524       Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1525       Block* tail = get_block_for_node(tail_n);
1526 
1527       // Defensively filter out Loop nodes for non-single-entry loops.
1528       // For all reasonable loops, the head occurs before the tail in RPO.
1529       if (i <= tail->_rpo) {
1530 
1531         // The tail and (recursive) predecessors of the tail
1532         // are made members of a new loop.
1533 
1534         assert(worklist.size() == 0, "nonempty worklist");
1535         CFGLoop* nloop = new CFGLoop(idct++);
1536         assert(loop_head->_loop == NULL, "just checking");
1537         loop_head->_loop = nloop;
1538         // Add to nloop so push_pred() will skip over inner loops
1539         nloop->add_member(loop_head);
1540         nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
1541 
1542         while (worklist.size() > 0) {
1543           Block* member = worklist.pop();
1544           if (member != loop_head) {
1545             for (uint j = 1; j < member->num_preds(); j++) {
1546               nloop->push_pred(member, j, worklist, this);
1547             }
1548           }
1549         }
1550       }
1551     }
1552   }
1553 
1554   // Create a member list for each loop consisting
1555   // of both blocks and (immediate child) loops.
1556   for (uint i = 0; i < number_of_blocks(); i++) {
1557     Block* block = get_block(i);
1558     CFGLoop* lp = block->_loop;
1559     if (lp == NULL) {
1560       // Not assigned to a loop. Add it to the method's pseudo loop.
1561       block->_loop = root_loop;
1562       lp = root_loop;
1563     }
1564     if (lp == root_loop || block != lp->head()) { // loop heads are already members
1565       lp->add_member(block);
1566     }
1567     if (lp != root_loop) {
1568       if (lp->parent() == NULL) {
1569         // Not a nested loop. Make it a child of the method's pseudo loop.
1570         root_loop->add_nested_loop(lp);
1571       }
1572       if (block == lp->head()) {
1573         // Add nested loop to member list of parent loop.
1574         lp->parent()->add_member(lp);
1575       }
1576     }
1577   }
1578 
1579   return root_loop;
1580 }
1581 
1582 //------------------------------push_pred--------------------------------------
push_pred(Block * blk,int i,Block_List & worklist,PhaseCFG * cfg)1583 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
1584   Node* pred_n = blk->pred(i);
1585   Block* pred = cfg->get_block_for_node(pred_n);
1586   CFGLoop *pred_loop = pred->_loop;
1587   if (pred_loop == NULL) {
1588     // Filter out blocks for non-single-entry loops.
1589     // For all reasonable loops, the head occurs before the tail in RPO.
1590     if (pred->_rpo > head()->_rpo) {
1591       pred->_loop = this;
1592       worklist.push(pred);
1593     }
1594   } else if (pred_loop != this) {
1595     // Nested loop.
1596     while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
1597       pred_loop = pred_loop->_parent;
1598     }
1599     // Make pred's loop be a child
1600     if (pred_loop->_parent == NULL) {
1601       add_nested_loop(pred_loop);
1602       // Continue with loop entry predecessor.
1603       Block* pred_head = pred_loop->head();
1604       assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1605       assert(pred_head != head(), "loop head in only one loop");
1606       push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
1607     } else {
1608       assert(pred_loop->_parent == this && _parent == NULL, "just checking");
1609     }
1610   }
1611 }
1612 
1613 //------------------------------add_nested_loop--------------------------------
1614 // Make cl a child of the current loop in the loop tree.
add_nested_loop(CFGLoop * cl)1615 void CFGLoop::add_nested_loop(CFGLoop* cl) {
1616   assert(_parent == NULL, "no parent yet");
1617   assert(cl != this, "not my own parent");
1618   cl->_parent = this;
1619   CFGLoop* ch = _child;
1620   if (ch == NULL) {
1621     _child = cl;
1622   } else {
1623     while (ch->_sibling != NULL) { ch = ch->_sibling; }
1624     ch->_sibling = cl;
1625   }
1626 }
1627 
1628 //------------------------------compute_loop_depth-----------------------------
1629 // Store the loop depth in each CFGLoop object.
1630 // Recursively walk the children to do the same for them.
compute_loop_depth(int depth)1631 void CFGLoop::compute_loop_depth(int depth) {
1632   _depth = depth;
1633   CFGLoop* ch = _child;
1634   while (ch != NULL) {
1635     ch->compute_loop_depth(depth + 1);
1636     ch = ch->_sibling;
1637   }
1638 }
1639 
1640 //------------------------------compute_freq-----------------------------------
1641 // Compute the frequency of each block and loop, relative to a single entry
1642 // into the dominating loop head.
compute_freq()1643 void CFGLoop::compute_freq() {
1644   // Bottom up traversal of loop tree (visit inner loops first.)
1645   // Set loop head frequency to 1.0, then transitively
1646   // compute frequency for all successors in the loop,
1647   // as well as for each exit edge.  Inner loops are
1648   // treated as single blocks with loop exit targets
1649   // as the successor blocks.
1650 
1651   // Nested loops first
1652   CFGLoop* ch = _child;
1653   while (ch != NULL) {
1654     ch->compute_freq();
1655     ch = ch->_sibling;
1656   }
1657   assert (_members.length() > 0, "no empty loops");
1658   Block* hd = head();
1659   hd->_freq = 1.0f;
1660   for (int i = 0; i < _members.length(); i++) {
1661     CFGElement* s = _members.at(i);
1662     float freq = s->_freq;
1663     if (s->is_block()) {
1664       Block* b = s->as_Block();
1665       for (uint j = 0; j < b->_num_succs; j++) {
1666         Block* sb = b->_succs[j];
1667         update_succ_freq(sb, freq * b->succ_prob(j));
1668       }
1669     } else {
1670       CFGLoop* lp = s->as_CFGLoop();
1671       assert(lp->_parent == this, "immediate child");
1672       for (int k = 0; k < lp->_exits.length(); k++) {
1673         Block* eb = lp->_exits.at(k).get_target();
1674         float prob = lp->_exits.at(k).get_prob();
1675         update_succ_freq(eb, freq * prob);
1676       }
1677     }
1678   }
1679 
1680   // For all loops other than the outer, "method" loop,
1681   // sum and normalize the exit probability. The "method" loop
1682   // should keep the initial exit probability of 1, so that
1683   // inner blocks do not get erroneously scaled.
1684   if (_depth != 0) {
1685     // Total the exit probabilities for this loop.
1686     float exits_sum = 0.0f;
1687     for (int i = 0; i < _exits.length(); i++) {
1688       exits_sum += _exits.at(i).get_prob();
1689     }
1690 
1691     // Normalize the exit probabilities. Until now, the
1692     // probabilities estimate the possibility of exit per
1693     // a single loop iteration; afterward, they estimate
1694     // the probability of exit per loop entry.
1695     for (int i = 0; i < _exits.length(); i++) {
1696       Block* et = _exits.at(i).get_target();
1697       float new_prob = 0.0f;
1698       if (_exits.at(i).get_prob() > 0.0f) {
1699         new_prob = _exits.at(i).get_prob() / exits_sum;
1700       }
1701       BlockProbPair bpp(et, new_prob);
1702       _exits.at_put(i, bpp);
1703     }
1704 
1705     // Save the total, but guard against unreasonable probability,
1706     // as the value is used to estimate the loop trip count.
1707     // An infinite trip count would blur relative block
1708     // frequencies.
1709     if (exits_sum > 1.0f) exits_sum = 1.0;
1710     if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
1711     _exit_prob = exits_sum;
1712   }
1713 }
1714 
1715 //------------------------------succ_prob-------------------------------------
1716 // Determine the probability of reaching successor 'i' from the receiver block.
succ_prob(uint i)1717 float Block::succ_prob(uint i) {
1718   int eidx = end_idx();
1719   Node *n = get_node(eidx);  // Get ending Node
1720 
1721   int op = n->Opcode();
1722   if (n->is_Mach()) {
1723     if (n->is_MachNullCheck()) {
1724       // Can only reach here if called after lcm. The original Op_If is gone,
1725       // so we attempt to infer the probability from one or both of the
1726       // successor blocks.
1727       assert(_num_succs == 2, "expecting 2 successors of a null check");
1728       // If either successor has only one predecessor, then the
1729       // probability estimate can be derived using the
1730       // relative frequency of the successor and this block.
1731       if (_succs[i]->num_preds() == 2) {
1732         return _succs[i]->_freq / _freq;
1733       } else if (_succs[1-i]->num_preds() == 2) {
1734         return 1 - (_succs[1-i]->_freq / _freq);
1735       } else {
1736         // Estimate using both successor frequencies
1737         float freq = _succs[i]->_freq;
1738         return freq / (freq + _succs[1-i]->_freq);
1739       }
1740     }
1741     op = n->as_Mach()->ideal_Opcode();
1742   }
1743 
1744 
1745   // Switch on branch type
1746   switch( op ) {
1747   case Op_CountedLoopEnd:
1748   case Op_If: {
1749     assert (i < 2, "just checking");
1750     // Conditionals pass on only part of their frequency
1751     float prob  = n->as_MachIf()->_prob;
1752     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
1753     // If succ[i] is the FALSE branch, invert path info
1754     if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
1755       return 1.0f - prob; // not taken
1756     } else {
1757       return prob; // taken
1758     }
1759   }
1760 
1761   case Op_Jump:
1762     // Divide the frequency between all successors evenly
1763     return 1.0f/_num_succs;
1764 
1765   case Op_Catch: {
1766     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1767     if (ci->_con == CatchProjNode::fall_through_index) {
1768       // Fall-thru path gets the lion's share.
1769       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
1770     } else {
1771       // Presume exceptional paths are equally unlikely
1772       return PROB_UNLIKELY_MAG(5);
1773     }
1774   }
1775 
1776   case Op_Root:
1777   case Op_Goto:
1778     // Pass frequency straight thru to target
1779     return 1.0f;
1780 
1781   case Op_NeverBranch:
1782     return 0.0f;
1783 
1784   case Op_TailCall:
1785   case Op_TailJump:
1786   case Op_Return:
1787   case Op_Halt:
1788   case Op_Rethrow:
1789     // Do not push out freq to root block
1790     return 0.0f;
1791 
1792   default:
1793     ShouldNotReachHere();
1794   }
1795 
1796   return 0.0f;
1797 }
1798 
1799 //------------------------------num_fall_throughs-----------------------------
1800 // Return the number of fall-through candidates for a block
num_fall_throughs()1801 int Block::num_fall_throughs() {
1802   int eidx = end_idx();
1803   Node *n = get_node(eidx);  // Get ending Node
1804 
1805   int op = n->Opcode();
1806   if (n->is_Mach()) {
1807     if (n->is_MachNullCheck()) {
1808       // In theory, either side can fall-thru, for simplicity sake,
1809       // let's say only the false branch can now.
1810       return 1;
1811     }
1812     op = n->as_Mach()->ideal_Opcode();
1813   }
1814 
1815   // Switch on branch type
1816   switch( op ) {
1817   case Op_CountedLoopEnd:
1818   case Op_If:
1819     return 2;
1820 
1821   case Op_Root:
1822   case Op_Goto:
1823     return 1;
1824 
1825   case Op_Catch: {
1826     for (uint i = 0; i < _num_succs; i++) {
1827       const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1828       if (ci->_con == CatchProjNode::fall_through_index) {
1829         return 1;
1830       }
1831     }
1832     return 0;
1833   }
1834 
1835   case Op_Jump:
1836   case Op_NeverBranch:
1837   case Op_TailCall:
1838   case Op_TailJump:
1839   case Op_Return:
1840   case Op_Halt:
1841   case Op_Rethrow:
1842     return 0;
1843 
1844   default:
1845     ShouldNotReachHere();
1846   }
1847 
1848   return 0;
1849 }
1850 
1851 //------------------------------succ_fall_through-----------------------------
1852 // Return true if a specific successor could be fall-through target.
succ_fall_through(uint i)1853 bool Block::succ_fall_through(uint i) {
1854   int eidx = end_idx();
1855   Node *n = get_node(eidx);  // Get ending Node
1856 
1857   int op = n->Opcode();
1858   if (n->is_Mach()) {
1859     if (n->is_MachNullCheck()) {
1860       // In theory, either side can fall-thru, for simplicity sake,
1861       // let's say only the false branch can now.
1862       return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
1863     }
1864     op = n->as_Mach()->ideal_Opcode();
1865   }
1866 
1867   // Switch on branch type
1868   switch( op ) {
1869   case Op_CountedLoopEnd:
1870   case Op_If:
1871   case Op_Root:
1872   case Op_Goto:
1873     return true;
1874 
1875   case Op_Catch: {
1876     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1877     return ci->_con == CatchProjNode::fall_through_index;
1878   }
1879 
1880   case Op_Jump:
1881   case Op_NeverBranch:
1882   case Op_TailCall:
1883   case Op_TailJump:
1884   case Op_Return:
1885   case Op_Halt:
1886   case Op_Rethrow:
1887     return false;
1888 
1889   default:
1890     ShouldNotReachHere();
1891   }
1892 
1893   return false;
1894 }
1895 
1896 //------------------------------update_uncommon_branch------------------------
1897 // Update the probability of a two-branch to be uncommon
update_uncommon_branch(Block * ub)1898 void Block::update_uncommon_branch(Block* ub) {
1899   int eidx = end_idx();
1900   Node *n = get_node(eidx);  // Get ending Node
1901 
1902   int op = n->as_Mach()->ideal_Opcode();
1903 
1904   assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
1905   assert(num_fall_throughs() == 2, "must be a two way branch block");
1906 
1907   // Which successor is ub?
1908   uint s;
1909   for (s = 0; s <_num_succs; s++) {
1910     if (_succs[s] == ub) break;
1911   }
1912   assert(s < 2, "uncommon successor must be found");
1913 
1914   // If ub is the true path, make the proability small, else
1915   // ub is the false path, and make the probability large
1916   bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
1917 
1918   // Get existing probability
1919   float p = n->as_MachIf()->_prob;
1920 
1921   if (invert) p = 1.0 - p;
1922   if (p > PROB_MIN) {
1923     p = PROB_MIN;
1924   }
1925   if (invert) p = 1.0 - p;
1926 
1927   n->as_MachIf()->_prob = p;
1928 }
1929 
1930 //------------------------------update_succ_freq-------------------------------
1931 // Update the appropriate frequency associated with block 'b', a successor of
1932 // a block in this loop.
update_succ_freq(Block * b,float freq)1933 void CFGLoop::update_succ_freq(Block* b, float freq) {
1934   if (b->_loop == this) {
1935     if (b == head()) {
1936       // back branch within the loop
1937       // Do nothing now, the loop carried frequency will be
1938       // adjust later in scale_freq().
1939     } else {
1940       // simple branch within the loop
1941       b->_freq += freq;
1942     }
1943   } else if (!in_loop_nest(b)) {
1944     // branch is exit from this loop
1945     BlockProbPair bpp(b, freq);
1946     _exits.append(bpp);
1947   } else {
1948     // branch into nested loop
1949     CFGLoop* ch = b->_loop;
1950     ch->_freq += freq;
1951   }
1952 }
1953 
1954 //------------------------------in_loop_nest-----------------------------------
1955 // Determine if block b is in the receiver's loop nest.
in_loop_nest(Block * b)1956 bool CFGLoop::in_loop_nest(Block* b) {
1957   int depth = _depth;
1958   CFGLoop* b_loop = b->_loop;
1959   int b_depth = b_loop->_depth;
1960   if (depth == b_depth) {
1961     return true;
1962   }
1963   while (b_depth > depth) {
1964     b_loop = b_loop->_parent;
1965     b_depth = b_loop->_depth;
1966   }
1967   return b_loop == this;
1968 }
1969 
1970 //------------------------------scale_freq-------------------------------------
1971 // Scale frequency of loops and blocks by trip counts from outer loops
1972 // Do a top down traversal of loop tree (visit outer loops first.)
scale_freq()1973 void CFGLoop::scale_freq() {
1974   float loop_freq = _freq * trip_count();
1975   _freq = loop_freq;
1976   for (int i = 0; i < _members.length(); i++) {
1977     CFGElement* s = _members.at(i);
1978     float block_freq = s->_freq * loop_freq;
1979     if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
1980       block_freq = MIN_BLOCK_FREQUENCY;
1981     s->_freq = block_freq;
1982   }
1983   CFGLoop* ch = _child;
1984   while (ch != NULL) {
1985     ch->scale_freq();
1986     ch = ch->_sibling;
1987   }
1988 }
1989 
1990 // Frequency of outer loop
outer_loop_freq() const1991 float CFGLoop::outer_loop_freq() const {
1992   if (_child != NULL) {
1993     return _child->_freq;
1994   }
1995   return _freq;
1996 }
1997 
1998 #ifndef PRODUCT
1999 //------------------------------dump_tree--------------------------------------
dump_tree() const2000 void CFGLoop::dump_tree() const {
2001   dump();
2002   if (_child != NULL)   _child->dump_tree();
2003   if (_sibling != NULL) _sibling->dump_tree();
2004 }
2005 
2006 //------------------------------dump-------------------------------------------
dump() const2007 void CFGLoop::dump() const {
2008   for (int i = 0; i < _depth; i++) tty->print("   ");
2009   tty->print("%s: %d  trip_count: %6.0f freq: %6.0f\n",
2010              _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2011   for (int i = 0; i < _depth; i++) tty->print("   ");
2012   tty->print("         members:");
2013   int k = 0;
2014   for (int i = 0; i < _members.length(); i++) {
2015     if (k++ >= 6) {
2016       tty->print("\n              ");
2017       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2018       k = 0;
2019     }
2020     CFGElement *s = _members.at(i);
2021     if (s->is_block()) {
2022       Block *b = s->as_Block();
2023       tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2024     } else {
2025       CFGLoop* lp = s->as_CFGLoop();
2026       tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2027     }
2028   }
2029   tty->print("\n");
2030   for (int i = 0; i < _depth; i++) tty->print("   ");
2031   tty->print("         exits:  ");
2032   k = 0;
2033   for (int i = 0; i < _exits.length(); i++) {
2034     if (k++ >= 7) {
2035       tty->print("\n              ");
2036       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2037       k = 0;
2038     }
2039     Block *blk = _exits.at(i).get_target();
2040     float prob = _exits.at(i).get_prob();
2041     tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2042   }
2043   tty->print("\n");
2044 }
2045 #endif
2046