1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "libadt/vectset.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "opto/block.hpp"
30 #include "opto/c2compiler.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/machnode.hpp"
34 #include "opto/opcodes.hpp"
35 #include "opto/phaseX.hpp"
36 #include "opto/rootnode.hpp"
37 #include "opto/runtime.hpp"
38 #include "opto/chaitin.hpp"
39 #include "runtime/deoptimization.hpp"
40
41 // Portions of code courtesy of Clifford Click
42
43 // Optimization - Graph Style
44
45 // To avoid float value underflow
46 #define MIN_BLOCK_FREQUENCY 1.e-35f
47
48 //----------------------------schedule_node_into_block-------------------------
49 // Insert node n into block b. Look for projections of n and make sure they
50 // are in b also.
schedule_node_into_block(Node * n,Block * b)51 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
52 // Set basic block of n, Add n to b,
53 map_node_to_block(n, b);
54 b->add_inst(n);
55
56 // After Matching, nearly any old Node may have projections trailing it.
57 // These are usually machine-dependent flags. In any case, they might
58 // float to another block below this one. Move them up.
59 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
60 Node* use = n->fast_out(i);
61 if (use->is_Proj()) {
62 Block* buse = get_block_for_node(use);
63 if (buse != b) { // In wrong block?
64 if (buse != NULL) {
65 buse->find_remove(use); // Remove from wrong block
66 }
67 map_node_to_block(use, b);
68 b->add_inst(use);
69 }
70 }
71 }
72 }
73
74 //----------------------------replace_block_proj_ctrl-------------------------
75 // Nodes that have is_block_proj() nodes as their control need to use
76 // the appropriate Region for their actual block as their control since
77 // the projection will be in a predecessor block.
replace_block_proj_ctrl(Node * n)78 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
79 const Node *in0 = n->in(0);
80 assert(in0 != NULL, "Only control-dependent");
81 const Node *p = in0->is_block_proj();
82 if (p != NULL && p != n) { // Control from a block projection?
83 assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
84 // Find trailing Region
85 Block *pb = get_block_for_node(in0); // Block-projection already has basic block
86 uint j = 0;
87 if (pb->_num_succs != 1) { // More then 1 successor?
88 // Search for successor
89 uint max = pb->number_of_nodes();
90 assert( max > 1, "" );
91 uint start = max - pb->_num_succs;
92 // Find which output path belongs to projection
93 for (j = start; j < max; j++) {
94 if( pb->get_node(j) == in0 )
95 break;
96 }
97 assert( j < max, "must find" );
98 // Change control to match head of successor basic block
99 j -= start;
100 }
101 n->set_req(0, pb->_succs[j]->head());
102 }
103 }
104
is_dominator(Node * dom_node,Node * node)105 bool PhaseCFG::is_dominator(Node* dom_node, Node* node) {
106 assert(is_CFG(node) && is_CFG(dom_node), "node and dom_node must be CFG nodes");
107 if (dom_node == node) {
108 return true;
109 }
110 Block* d = find_block_for_node(dom_node);
111 Block* n = find_block_for_node(node);
112 assert(n != NULL && d != NULL, "blocks must exist");
113
114 if (d == n) {
115 if (dom_node->is_block_start()) {
116 return true;
117 }
118 if (node->is_block_start()) {
119 return false;
120 }
121 if (dom_node->is_block_proj()) {
122 return false;
123 }
124 if (node->is_block_proj()) {
125 return true;
126 }
127
128 assert(is_control_proj_or_safepoint(node), "node must be control projection or safepoint");
129 assert(is_control_proj_or_safepoint(dom_node), "dom_node must be control projection or safepoint");
130
131 // Neither 'node' nor 'dom_node' is a block start or block projection.
132 // Check if 'dom_node' is above 'node' in the control graph.
133 if (is_dominating_control(dom_node, node)) {
134 return true;
135 }
136
137 #ifdef ASSERT
138 // If 'dom_node' does not dominate 'node' then 'node' has to dominate 'dom_node'
139 if (!is_dominating_control(node, dom_node)) {
140 node->dump();
141 dom_node->dump();
142 assert(false, "neither dom_node nor node dominates the other");
143 }
144 #endif
145
146 return false;
147 }
148 return d->dom_lca(n) == d;
149 }
150
is_CFG(Node * n)151 bool PhaseCFG::is_CFG(Node* n) {
152 return n->is_block_proj() || n->is_block_start() || is_control_proj_or_safepoint(n);
153 }
154
is_control_proj_or_safepoint(Node * n) const155 bool PhaseCFG::is_control_proj_or_safepoint(Node* n) const {
156 bool result = (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) || (n->is_Proj() && n->as_Proj()->bottom_type() == Type::CONTROL);
157 assert(!result || (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint)
158 || (n->is_Proj() && n->as_Proj()->_con == 0), "If control projection, it must be projection 0");
159 return result;
160 }
161
find_block_for_node(Node * n) const162 Block* PhaseCFG::find_block_for_node(Node* n) const {
163 if (n->is_block_start() || n->is_block_proj()) {
164 return get_block_for_node(n);
165 } else {
166 // Walk the control graph up if 'n' is not a block start nor a block projection. In this case 'n' must be
167 // an unmatched control projection or a not yet matched safepoint precedence edge in the middle of a block.
168 assert(is_control_proj_or_safepoint(n), "must be control projection or safepoint");
169 Node* ctrl = n->in(0);
170 while (!ctrl->is_block_start()) {
171 ctrl = ctrl->in(0);
172 }
173 return get_block_for_node(ctrl);
174 }
175 }
176
177 // Walk up the control graph from 'n' and check if 'dom_ctrl' is found.
is_dominating_control(Node * dom_ctrl,Node * n)178 bool PhaseCFG::is_dominating_control(Node* dom_ctrl, Node* n) {
179 Node* ctrl = n->in(0);
180 while (!ctrl->is_block_start()) {
181 if (ctrl == dom_ctrl) {
182 return true;
183 }
184 ctrl = ctrl->in(0);
185 }
186 return false;
187 }
188
189
190 //------------------------------schedule_pinned_nodes--------------------------
191 // Set the basic block for Nodes pinned into blocks
schedule_pinned_nodes(VectorSet & visited)192 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
193 // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
194 GrowableArray <Node*> spstack(C->live_nodes() + 8);
195 spstack.push(_root);
196 while (spstack.is_nonempty()) {
197 Node* node = spstack.pop();
198 if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
199 if (node->pinned() && !has_block(node)) { // Pinned? Nail it down!
200 assert(node->in(0), "pinned Node must have Control");
201 // Before setting block replace block_proj control edge
202 replace_block_proj_ctrl(node);
203 Node* input = node->in(0);
204 while (!input->is_block_start()) {
205 input = input->in(0);
206 }
207 Block* block = get_block_for_node(input); // Basic block of controlling input
208 schedule_node_into_block(node, block);
209 }
210
211 // If the node has precedence edges (added when CastPP nodes are
212 // removed in final_graph_reshaping), fix the control of the
213 // node to cover the precedence edges and remove the
214 // dependencies.
215 Node* n = NULL;
216 for (uint i = node->len()-1; i >= node->req(); i--) {
217 Node* m = node->in(i);
218 if (m == NULL) continue;
219
220 // Only process precedence edges that are CFG nodes. Safepoints and control projections can be in the middle of a block
221 if (is_CFG(m)) {
222 node->rm_prec(i);
223 if (n == NULL) {
224 n = m;
225 } else {
226 assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");
227 n = is_dominator(n, m) ? m : n;
228 }
229 } else {
230 assert(node->is_Mach(), "sanity");
231 assert(node->as_Mach()->ideal_Opcode() == Op_StoreCM, "must be StoreCM node");
232 }
233 }
234 if (n != NULL) {
235 assert(node->in(0), "control should have been set");
236 assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other");
237 if (!is_dominator(n, node->in(0))) {
238 node->set_req(0, n);
239 }
240 }
241
242 // process all inputs that are non NULL
243 for (int i = node->req()-1; i >= 0; --i) {
244 if (node->in(i) != NULL) {
245 spstack.push(node->in(i));
246 }
247 }
248 }
249 }
250 }
251
252 #ifdef ASSERT
253 // Assert that new input b2 is dominated by all previous inputs.
254 // Check this by by seeing that it is dominated by b1, the deepest
255 // input observed until b2.
assert_dom(Block * b1,Block * b2,Node * n,const PhaseCFG * cfg)256 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
257 if (b1 == NULL) return;
258 assert(b1->_dom_depth < b2->_dom_depth, "sanity");
259 Block* tmp = b2;
260 while (tmp != b1 && tmp != NULL) {
261 tmp = tmp->_idom;
262 }
263 if (tmp != b1) {
264 // Detected an unschedulable graph. Print some nice stuff and die.
265 tty->print_cr("!!! Unschedulable graph !!!");
266 for (uint j=0; j<n->len(); j++) { // For all inputs
267 Node* inn = n->in(j); // Get input
268 if (inn == NULL) continue; // Ignore NULL, missing inputs
269 Block* inb = cfg->get_block_for_node(inn);
270 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
271 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
272 inn->dump();
273 }
274 tty->print("Failing node: ");
275 n->dump();
276 assert(false, "unscheduable graph");
277 }
278 }
279 #endif
280
find_deepest_input(Node * n,const PhaseCFG * cfg)281 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
282 // Find the last input dominated by all other inputs.
283 Block* deepb = NULL; // Deepest block so far
284 int deepb_dom_depth = 0;
285 for (uint k = 0; k < n->len(); k++) { // For all inputs
286 Node* inn = n->in(k); // Get input
287 if (inn == NULL) continue; // Ignore NULL, missing inputs
288 Block* inb = cfg->get_block_for_node(inn);
289 assert(inb != NULL, "must already have scheduled this input");
290 if (deepb_dom_depth < (int) inb->_dom_depth) {
291 // The new inb must be dominated by the previous deepb.
292 // The various inputs must be linearly ordered in the dom
293 // tree, or else there will not be a unique deepest block.
294 DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
295 deepb = inb; // Save deepest block
296 deepb_dom_depth = deepb->_dom_depth;
297 }
298 }
299 assert(deepb != NULL, "must be at least one input to n");
300 return deepb;
301 }
302
303
304 //------------------------------schedule_early---------------------------------
305 // Find the earliest Block any instruction can be placed in. Some instructions
306 // are pinned into Blocks. Unpinned instructions can appear in last block in
307 // which all their inputs occur.
schedule_early(VectorSet & visited,Node_Stack & roots)308 bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
309 // Allocate stack with enough space to avoid frequent realloc
310 Node_Stack nstack(roots.size() + 8);
311 // _root will be processed among C->top() inputs
312 roots.push(C->top(), 0);
313 visited.set(C->top()->_idx);
314
315 while (roots.size() != 0) {
316 // Use local variables nstack_top_n & nstack_top_i to cache values
317 // on stack's top.
318 Node* parent_node = roots.node();
319 uint input_index = 0;
320 roots.pop();
321
322 while (true) {
323 if (input_index == 0) {
324 // Fixup some control. Constants without control get attached
325 // to root and nodes that use is_block_proj() nodes should be attached
326 // to the region that starts their block.
327 const Node* control_input = parent_node->in(0);
328 if (control_input != NULL) {
329 replace_block_proj_ctrl(parent_node);
330 } else {
331 // Is a constant with NO inputs?
332 if (parent_node->req() == 1) {
333 parent_node->set_req(0, _root);
334 }
335 }
336 }
337
338 // First, visit all inputs and force them to get a block. If an
339 // input is already in a block we quit following inputs (to avoid
340 // cycles). Instead we put that Node on a worklist to be handled
341 // later (since IT'S inputs may not have a block yet).
342
343 // Assume all n's inputs will be processed
344 bool done = true;
345
346 while (input_index < parent_node->len()) {
347 Node* in = parent_node->in(input_index++);
348 if (in == NULL) {
349 continue;
350 }
351
352 int is_visited = visited.test_set(in->_idx);
353 if (!has_block(in)) {
354 if (is_visited) {
355 assert(false, "graph should be schedulable");
356 return false;
357 }
358 // Save parent node and next input's index.
359 nstack.push(parent_node, input_index);
360 // Process current input now.
361 parent_node = in;
362 input_index = 0;
363 // Not all n's inputs processed.
364 done = false;
365 break;
366 } else if (!is_visited) {
367 // Visit this guy later, using worklist
368 roots.push(in, 0);
369 }
370 }
371
372 if (done) {
373 // All of n's inputs have been processed, complete post-processing.
374
375 // Some instructions are pinned into a block. These include Region,
376 // Phi, Start, Return, and other control-dependent instructions and
377 // any projections which depend on them.
378 if (!parent_node->pinned()) {
379 // Set earliest legal block.
380 Block* earliest_block = find_deepest_input(parent_node, this);
381 map_node_to_block(parent_node, earliest_block);
382 } else {
383 assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
384 }
385
386 if (nstack.is_empty()) {
387 // Finished all nodes on stack.
388 // Process next node on the worklist 'roots'.
389 break;
390 }
391 // Get saved parent node and next input's index.
392 parent_node = nstack.node();
393 input_index = nstack.index();
394 nstack.pop();
395 }
396 }
397 }
398 return true;
399 }
400
401 //------------------------------dom_lca----------------------------------------
402 // Find least common ancestor in dominator tree
403 // LCA is a current notion of LCA, to be raised above 'this'.
404 // As a convenient boundary condition, return 'this' if LCA is NULL.
405 // Find the LCA of those two nodes.
dom_lca(Block * LCA)406 Block* Block::dom_lca(Block* LCA) {
407 if (LCA == NULL || LCA == this) return this;
408
409 Block* anc = this;
410 while (anc->_dom_depth > LCA->_dom_depth)
411 anc = anc->_idom; // Walk up till anc is as high as LCA
412
413 while (LCA->_dom_depth > anc->_dom_depth)
414 LCA = LCA->_idom; // Walk up till LCA is as high as anc
415
416 while (LCA != anc) { // Walk both up till they are the same
417 LCA = LCA->_idom;
418 anc = anc->_idom;
419 }
420
421 return LCA;
422 }
423
424 //--------------------------raise_LCA_above_use--------------------------------
425 // We are placing a definition, and have been given a def->use edge.
426 // The definition must dominate the use, so move the LCA upward in the
427 // dominator tree to dominate the use. If the use is a phi, adjust
428 // the LCA only with the phi input paths which actually use this def.
raise_LCA_above_use(Block * LCA,Node * use,Node * def,const PhaseCFG * cfg)429 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
430 Block* buse = cfg->get_block_for_node(use);
431 if (buse == NULL) return LCA; // Unused killing Projs have no use block
432 if (!use->is_Phi()) return buse->dom_lca(LCA);
433 uint pmax = use->req(); // Number of Phi inputs
434 // Why does not this loop just break after finding the matching input to
435 // the Phi? Well...it's like this. I do not have true def-use/use-def
436 // chains. Means I cannot distinguish, from the def-use direction, which
437 // of many use-defs lead from the same use to the same def. That is, this
438 // Phi might have several uses of the same def. Each use appears in a
439 // different predecessor block. But when I enter here, I cannot distinguish
440 // which use-def edge I should find the predecessor block for. So I find
441 // them all. Means I do a little extra work if a Phi uses the same value
442 // more than once.
443 for (uint j=1; j<pmax; j++) { // For all inputs
444 if (use->in(j) == def) { // Found matching input?
445 Block* pred = cfg->get_block_for_node(buse->pred(j));
446 LCA = pred->dom_lca(LCA);
447 }
448 }
449 return LCA;
450 }
451
452 //----------------------------raise_LCA_above_marks----------------------------
453 // Return a new LCA that dominates LCA and any of its marked predecessors.
454 // Search all my parents up to 'early' (exclusive), looking for predecessors
455 // which are marked with the given index. Return the LCA (in the dom tree)
456 // of all marked blocks. If there are none marked, return the original
457 // LCA.
raise_LCA_above_marks(Block * LCA,node_idx_t mark,Block * early,const PhaseCFG * cfg)458 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
459 Block_List worklist;
460 worklist.push(LCA);
461 while (worklist.size() > 0) {
462 Block* mid = worklist.pop();
463 if (mid == early) continue; // stop searching here
464
465 // Test and set the visited bit.
466 if (mid->raise_LCA_visited() == mark) continue; // already visited
467
468 // Don't process the current LCA, otherwise the search may terminate early
469 if (mid != LCA && mid->raise_LCA_mark() == mark) {
470 // Raise the LCA.
471 LCA = mid->dom_lca(LCA);
472 if (LCA == early) break; // stop searching everywhere
473 assert(early->dominates(LCA), "early is high enough");
474 // Resume searching at that point, skipping intermediate levels.
475 worklist.push(LCA);
476 if (LCA == mid)
477 continue; // Don't mark as visited to avoid early termination.
478 } else {
479 // Keep searching through this block's predecessors.
480 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
481 Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
482 worklist.push(mid_parent);
483 }
484 }
485 mid->set_raise_LCA_visited(mark);
486 }
487 return LCA;
488 }
489
490 //--------------------------memory_early_block--------------------------------
491 // This is a variation of find_deepest_input, the heart of schedule_early.
492 // Find the "early" block for a load, if we considered only memory and
493 // address inputs, that is, if other data inputs were ignored.
494 //
495 // Because a subset of edges are considered, the resulting block will
496 // be earlier (at a shallower dom_depth) than the true schedule_early
497 // point of the node. We compute this earlier block as a more permissive
498 // site for anti-dependency insertion, but only if subsume_loads is enabled.
memory_early_block(Node * load,Block * early,const PhaseCFG * cfg)499 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
500 Node* base;
501 Node* index;
502 Node* store = load->in(MemNode::Memory);
503 load->as_Mach()->memory_inputs(base, index);
504
505 assert(base != NodeSentinel && index != NodeSentinel,
506 "unexpected base/index inputs");
507
508 Node* mem_inputs[4];
509 int mem_inputs_length = 0;
510 if (base != NULL) mem_inputs[mem_inputs_length++] = base;
511 if (index != NULL) mem_inputs[mem_inputs_length++] = index;
512 if (store != NULL) mem_inputs[mem_inputs_length++] = store;
513
514 // In the comparision below, add one to account for the control input,
515 // which may be null, but always takes up a spot in the in array.
516 if (mem_inputs_length + 1 < (int) load->req()) {
517 // This "load" has more inputs than just the memory, base and index inputs.
518 // For purposes of checking anti-dependences, we need to start
519 // from the early block of only the address portion of the instruction,
520 // and ignore other blocks that may have factored into the wider
521 // schedule_early calculation.
522 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
523
524 Block* deepb = NULL; // Deepest block so far
525 int deepb_dom_depth = 0;
526 for (int i = 0; i < mem_inputs_length; i++) {
527 Block* inb = cfg->get_block_for_node(mem_inputs[i]);
528 if (deepb_dom_depth < (int) inb->_dom_depth) {
529 // The new inb must be dominated by the previous deepb.
530 // The various inputs must be linearly ordered in the dom
531 // tree, or else there will not be a unique deepest block.
532 DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
533 deepb = inb; // Save deepest block
534 deepb_dom_depth = deepb->_dom_depth;
535 }
536 }
537 early = deepb;
538 }
539
540 return early;
541 }
542
543 // This function is used by insert_anti_dependences to find unrelated loads for stores in implicit null checks.
unrelated_load_in_store_null_block(Node * store,Node * load)544 bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) {
545 // We expect an anti-dependence edge from 'load' to 'store', except when
546 // implicit_null_check() has hoisted 'store' above its early block to
547 // perform an implicit null check, and 'load' is placed in the null
548 // block. In this case it is safe to ignore the anti-dependence, as the
549 // null block is only reached if 'store' tries to write to null object and
550 // 'load' read from non-null object (there is preceding check for that)
551 // These objects can't be the same.
552 Block* store_block = get_block_for_node(store);
553 Block* load_block = get_block_for_node(load);
554 Node* end = store_block->end();
555 if (end->is_MachNullCheck() && (end->in(1) == store) && store_block->dominates(load_block)) {
556 Node* if_true = end->find_out_with(Op_IfTrue);
557 assert(if_true != NULL, "null check without null projection");
558 Node* null_block_region = if_true->find_out_with(Op_Region);
559 assert(null_block_region != NULL, "null check without null region");
560 return get_block_for_node(null_block_region) == load_block;
561 }
562 return false;
563 }
564
565 //--------------------------insert_anti_dependences---------------------------
566 // A load may need to witness memory that nearby stores can overwrite.
567 // For each nearby store, either insert an "anti-dependence" edge
568 // from the load to the store, or else move LCA upward to force the
569 // load to (eventually) be scheduled in a block above the store.
570 //
571 // Do not add edges to stores on distinct control-flow paths;
572 // only add edges to stores which might interfere.
573 //
574 // Return the (updated) LCA. There will not be any possibly interfering
575 // store between the load's "early block" and the updated LCA.
576 // Any stores in the updated LCA will have new precedence edges
577 // back to the load. The caller is expected to schedule the load
578 // in the LCA, in which case the precedence edges will make LCM
579 // preserve anti-dependences. The caller may also hoist the load
580 // above the LCA, if it is not the early block.
insert_anti_dependences(Block * LCA,Node * load,bool verify)581 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
582 assert(load->needs_anti_dependence_check(), "must be a load of some sort");
583 assert(LCA != NULL, "");
584 DEBUG_ONLY(Block* LCA_orig = LCA);
585
586 // Compute the alias index. Loads and stores with different alias indices
587 // do not need anti-dependence edges.
588 int load_alias_idx = C->get_alias_index(load->adr_type());
589 #ifdef ASSERT
590 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&
591 (PrintOpto || VerifyAliases ||
592 (PrintMiscellaneous && (WizardMode || Verbose)))) {
593 // Load nodes should not consume all of memory.
594 // Reporting a bottom type indicates a bug in adlc.
595 // If some particular type of node validly consumes all of memory,
596 // sharpen the preceding "if" to exclude it, so we can catch bugs here.
597 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory.");
598 load->dump(2);
599 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, "");
600 }
601 #endif
602 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp),
603 "String compare is only known 'load' that does not conflict with any stores");
604 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals),
605 "String equals is a 'load' that does not conflict with any stores");
606 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf),
607 "String indexOf is a 'load' that does not conflict with any stores");
608 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOfChar),
609 "String indexOfChar is a 'load' that does not conflict with any stores");
610 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq),
611 "Arrays equals is a 'load' that does not conflict with any stores");
612 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_HasNegatives),
613 "HasNegatives is a 'load' that does not conflict with any stores");
614
615 if (!C->alias_type(load_alias_idx)->is_rewritable()) {
616 // It is impossible to spoil this load by putting stores before it,
617 // because we know that the stores will never update the value
618 // which 'load' must witness.
619 return LCA;
620 }
621
622 node_idx_t load_index = load->_idx;
623
624 // Note the earliest legal placement of 'load', as determined by
625 // by the unique point in the dom tree where all memory effects
626 // and other inputs are first available. (Computed by schedule_early.)
627 // For normal loads, 'early' is the shallowest place (dom graph wise)
628 // to look for anti-deps between this load and any store.
629 Block* early = get_block_for_node(load);
630
631 // If we are subsuming loads, compute an "early" block that only considers
632 // memory or address inputs. This block may be different than the
633 // schedule_early block in that it could be at an even shallower depth in the
634 // dominator tree, and allow for a broader discovery of anti-dependences.
635 if (C->subsume_loads()) {
636 early = memory_early_block(load, early, this);
637 }
638
639 ResourceArea *area = Thread::current()->resource_area();
640 Node_List worklist_mem(area); // prior memory state to store
641 Node_List worklist_store(area); // possible-def to explore
642 Node_List worklist_visited(area); // visited mergemem nodes
643 Node_List non_early_stores(area); // all relevant stores outside of early
644 bool must_raise_LCA = false;
645
646 #ifdef TRACK_PHI_INPUTS
647 // %%% This extra checking fails because MergeMem nodes are not GVNed.
648 // Provide "phi_inputs" to check if every input to a PhiNode is from the
649 // original memory state. This indicates a PhiNode for which should not
650 // prevent the load from sinking. For such a block, set_raise_LCA_mark
651 // may be overly conservative.
652 // Mechanism: count inputs seen for each Phi encountered in worklist_store.
653 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0));
654 #endif
655
656 // 'load' uses some memory state; look for users of the same state.
657 // Recurse through MergeMem nodes to the stores that use them.
658
659 // Each of these stores is a possible definition of memory
660 // that 'load' needs to use. We need to force 'load'
661 // to occur before each such store. When the store is in
662 // the same block as 'load', we insert an anti-dependence
663 // edge load->store.
664
665 // The relevant stores "nearby" the load consist of a tree rooted
666 // at initial_mem, with internal nodes of type MergeMem.
667 // Therefore, the branches visited by the worklist are of this form:
668 // initial_mem -> (MergeMem ->)* store
669 // The anti-dependence constraints apply only to the fringe of this tree.
670
671 Node* initial_mem = load->in(MemNode::Memory);
672 worklist_store.push(initial_mem);
673 worklist_visited.push(initial_mem);
674 worklist_mem.push(NULL);
675 while (worklist_store.size() > 0) {
676 // Examine a nearby store to see if it might interfere with our load.
677 Node* mem = worklist_mem.pop();
678 Node* store = worklist_store.pop();
679 uint op = store->Opcode();
680
681 // MergeMems do not directly have anti-deps.
682 // Treat them as internal nodes in a forward tree of memory states,
683 // the leaves of which are each a 'possible-def'.
684 if (store == initial_mem // root (exclusive) of tree we are searching
685 || op == Op_MergeMem // internal node of tree we are searching
686 ) {
687 mem = store; // It's not a possibly interfering store.
688 if (store == initial_mem)
689 initial_mem = NULL; // only process initial memory once
690
691 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
692 store = mem->fast_out(i);
693 if (store->is_MergeMem()) {
694 // Be sure we don't get into combinatorial problems.
695 // (Allow phis to be repeated; they can merge two relevant states.)
696 uint j = worklist_visited.size();
697 for (; j > 0; j--) {
698 if (worklist_visited.at(j-1) == store) break;
699 }
700 if (j > 0) continue; // already on work list; do not repeat
701 worklist_visited.push(store);
702 }
703 worklist_mem.push(mem);
704 worklist_store.push(store);
705 }
706 continue;
707 }
708
709 if (op == Op_MachProj || op == Op_Catch) continue;
710 if (store->needs_anti_dependence_check()) continue; // not really a store
711
712 // Compute the alias index. Loads and stores with different alias
713 // indices do not need anti-dependence edges. Wide MemBar's are
714 // anti-dependent on everything (except immutable memories).
715 const TypePtr* adr_type = store->adr_type();
716 if (!C->can_alias(adr_type, load_alias_idx)) continue;
717
718 // Most slow-path runtime calls do NOT modify Java memory, but
719 // they can block and so write Raw memory.
720 if (store->is_Mach()) {
721 MachNode* mstore = store->as_Mach();
722 if (load_alias_idx != Compile::AliasIdxRaw) {
723 // Check for call into the runtime using the Java calling
724 // convention (and from there into a wrapper); it has no
725 // _method. Can't do this optimization for Native calls because
726 // they CAN write to Java memory.
727 if (mstore->ideal_Opcode() == Op_CallStaticJava) {
728 assert(mstore->is_MachSafePoint(), "");
729 MachSafePointNode* ms = (MachSafePointNode*) mstore;
730 assert(ms->is_MachCallJava(), "");
731 MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
732 if (mcj->_method == NULL) {
733 // These runtime calls do not write to Java visible memory
734 // (other than Raw) and so do not require anti-dependence edges.
735 continue;
736 }
737 }
738 // Same for SafePoints: they read/write Raw but only read otherwise.
739 // This is basically a workaround for SafePoints only defining control
740 // instead of control + memory.
741 if (mstore->ideal_Opcode() == Op_SafePoint)
742 continue;
743 } else {
744 // Some raw memory, such as the load of "top" at an allocation,
745 // can be control dependent on the previous safepoint. See
746 // comments in GraphKit::allocate_heap() about control input.
747 // Inserting an anti-dep between such a safepoint and a use
748 // creates a cycle, and will cause a subsequent failure in
749 // local scheduling. (BugId 4919904)
750 // (%%% How can a control input be a safepoint and not a projection??)
751 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
752 continue;
753 }
754 }
755
756 // Identify a block that the current load must be above,
757 // or else observe that 'store' is all the way up in the
758 // earliest legal block for 'load'. In the latter case,
759 // immediately insert an anti-dependence edge.
760 Block* store_block = get_block_for_node(store);
761 assert(store_block != NULL, "unused killing projections skipped above");
762
763 if (store->is_Phi()) {
764 // Loop-phis need to raise load before input. (Other phis are treated
765 // as store below.)
766 //
767 // 'load' uses memory which is one (or more) of the Phi's inputs.
768 // It must be scheduled not before the Phi, but rather before
769 // each of the relevant Phi inputs.
770 //
771 // Instead of finding the LCA of all inputs to a Phi that match 'mem',
772 // we mark each corresponding predecessor block and do a combined
773 // hoisting operation later (raise_LCA_above_marks).
774 //
775 // Do not assert(store_block != early, "Phi merging memory after access")
776 // PhiNode may be at start of block 'early' with backedge to 'early'
777 DEBUG_ONLY(bool found_match = false);
778 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
779 if (store->in(j) == mem) { // Found matching input?
780 DEBUG_ONLY(found_match = true);
781 Block* pred_block = get_block_for_node(store_block->pred(j));
782 if (pred_block != early) {
783 // If any predecessor of the Phi matches the load's "early block",
784 // we do not need a precedence edge between the Phi and 'load'
785 // since the load will be forced into a block preceding the Phi.
786 pred_block->set_raise_LCA_mark(load_index);
787 assert(!LCA_orig->dominates(pred_block) ||
788 early->dominates(pred_block), "early is high enough");
789 must_raise_LCA = true;
790 } else {
791 // anti-dependent upon PHI pinned below 'early', no edge needed
792 LCA = early; // but can not schedule below 'early'
793 }
794 }
795 }
796 assert(found_match, "no worklist bug");
797 #ifdef TRACK_PHI_INPUTS
798 #ifdef ASSERT
799 // This assert asks about correct handling of PhiNodes, which may not
800 // have all input edges directly from 'mem'. See BugId 4621264
801 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1;
802 // Increment by exactly one even if there are multiple copies of 'mem'
803 // coming into the phi, because we will run this block several times
804 // if there are several copies of 'mem'. (That's how DU iterators work.)
805 phi_inputs.at_put(store->_idx, num_mem_inputs);
806 assert(PhiNode::Input + num_mem_inputs < store->req(),
807 "Expect at least one phi input will not be from original memory state");
808 #endif //ASSERT
809 #endif //TRACK_PHI_INPUTS
810 } else if (store_block != early) {
811 // 'store' is between the current LCA and earliest possible block.
812 // Label its block, and decide later on how to raise the LCA
813 // to include the effect on LCA of this store.
814 // If this store's block gets chosen as the raised LCA, we
815 // will find him on the non_early_stores list and stick him
816 // with a precedence edge.
817 // (But, don't bother if LCA is already raised all the way.)
818 if (LCA != early && !unrelated_load_in_store_null_block(store, load)) {
819 store_block->set_raise_LCA_mark(load_index);
820 must_raise_LCA = true;
821 non_early_stores.push(store);
822 }
823 } else {
824 // Found a possibly-interfering store in the load's 'early' block.
825 // This means 'load' cannot sink at all in the dominator tree.
826 // Add an anti-dep edge, and squeeze 'load' into the highest block.
827 assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");
828 if (verify) {
829 assert(store->find_edge(load) != -1 || unrelated_load_in_store_null_block(store, load),
830 "missing precedence edge");
831 } else {
832 store->add_prec(load);
833 }
834 LCA = early;
835 // This turns off the process of gathering non_early_stores.
836 }
837 }
838 // (Worklist is now empty; all nearby stores have been visited.)
839
840 // Finished if 'load' must be scheduled in its 'early' block.
841 // If we found any stores there, they have already been given
842 // precedence edges.
843 if (LCA == early) return LCA;
844
845 // We get here only if there are no possibly-interfering stores
846 // in the load's 'early' block. Move LCA up above all predecessors
847 // which contain stores we have noted.
848 //
849 // The raised LCA block can be a home to such interfering stores,
850 // but its predecessors must not contain any such stores.
851 //
852 // The raised LCA will be a lower bound for placing the load,
853 // preventing the load from sinking past any block containing
854 // a store that may invalidate the memory state required by 'load'.
855 if (must_raise_LCA)
856 LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
857 if (LCA == early) return LCA;
858
859 // Insert anti-dependence edges from 'load' to each store
860 // in the non-early LCA block.
861 // Mine the non_early_stores list for such stores.
862 if (LCA->raise_LCA_mark() == load_index) {
863 while (non_early_stores.size() > 0) {
864 Node* store = non_early_stores.pop();
865 Block* store_block = get_block_for_node(store);
866 if (store_block == LCA) {
867 // add anti_dependence from store to load in its own block
868 assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");
869 if (verify) {
870 assert(store->find_edge(load) != -1, "missing precedence edge");
871 } else {
872 store->add_prec(load);
873 }
874 } else {
875 assert(store_block->raise_LCA_mark() == load_index, "block was marked");
876 // Any other stores we found must be either inside the new LCA
877 // or else outside the original LCA. In the latter case, they
878 // did not interfere with any use of 'load'.
879 assert(LCA->dominates(store_block)
880 || !LCA_orig->dominates(store_block), "no stray stores");
881 }
882 }
883 }
884
885 // Return the highest block containing stores; any stores
886 // within that block have been given anti-dependence edges.
887 return LCA;
888 }
889
890 // This class is used to iterate backwards over the nodes in the graph.
891
892 class Node_Backward_Iterator {
893
894 private:
895 Node_Backward_Iterator();
896
897 public:
898 // Constructor for the iterator
899 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg);
900
901 // Postincrement operator to iterate over the nodes
902 Node *next();
903
904 private:
905 VectorSet &_visited;
906 Node_Stack &_stack;
907 PhaseCFG &_cfg;
908 };
909
910 // Constructor for the Node_Backward_Iterator
Node_Backward_Iterator(Node * root,VectorSet & visited,Node_Stack & stack,PhaseCFG & cfg)911 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg)
912 : _visited(visited), _stack(stack), _cfg(cfg) {
913 // The stack should contain exactly the root
914 stack.clear();
915 stack.push(root, root->outcnt());
916
917 // Clear the visited bits
918 visited.Clear();
919 }
920
921 // Iterator for the Node_Backward_Iterator
next()922 Node *Node_Backward_Iterator::next() {
923
924 // If the _stack is empty, then just return NULL: finished.
925 if ( !_stack.size() )
926 return NULL;
927
928 // I visit unvisited not-anti-dependence users first, then anti-dependent
929 // children next. I iterate backwards to support removal of nodes.
930 // The stack holds states consisting of 3 values:
931 // current Def node, flag which indicates 1st/2nd pass, index of current out edge
932 Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1);
933 bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1);
934 uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes.
935 _stack.pop();
936
937 // I cycle here when I am entering a deeper level of recursion.
938 // The key variable 'self' was set prior to jumping here.
939 while( 1 ) {
940
941 _visited.set(self->_idx);
942
943 // Now schedule all uses as late as possible.
944 const Node* src = self->is_Proj() ? self->in(0) : self;
945 uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
946
947 // Schedule all nodes in a post-order visit
948 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any
949
950 // Scan for unvisited nodes
951 while (idx > 0) {
952 // For all uses, schedule late
953 Node* n = self->raw_out(--idx); // Use
954
955 // Skip already visited children
956 if ( _visited.test(n->_idx) )
957 continue;
958
959 // do not traverse backward control edges
960 Node *use = n->is_Proj() ? n->in(0) : n;
961 uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
962
963 if ( use_rpo < src_rpo )
964 continue;
965
966 // Phi nodes always precede uses in a basic block
967 if ( use_rpo == src_rpo && use->is_Phi() )
968 continue;
969
970 unvisited = n; // Found unvisited
971
972 // Check for possible-anti-dependent
973 // 1st pass: No such nodes, 2nd pass: Only such nodes.
974 if (n->needs_anti_dependence_check() == iterate_anti_dep) {
975 unvisited = n; // Found unvisited
976 break;
977 }
978 }
979
980 // Did I find an unvisited not-anti-dependent Node?
981 if (!unvisited) {
982 if (!iterate_anti_dep) {
983 // 2nd pass: Iterate over nodes which needs_anti_dependence_check.
984 iterate_anti_dep = true;
985 idx = self->outcnt();
986 continue;
987 }
988 break; // All done with children; post-visit 'self'
989 }
990
991 // Visit the unvisited Node. Contains the obvious push to
992 // indicate I'm entering a deeper level of recursion. I push the
993 // old state onto the _stack and set a new state and loop (recurse).
994 _stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx);
995 self = unvisited;
996 iterate_anti_dep = false;
997 idx = self->outcnt();
998 } // End recursion loop
999
1000 return self;
1001 }
1002
1003 //------------------------------ComputeLatenciesBackwards----------------------
1004 // Compute the latency of all the instructions.
compute_latencies_backwards(VectorSet & visited,Node_Stack & stack)1005 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) {
1006 #ifndef PRODUCT
1007 if (trace_opto_pipelining())
1008 tty->print("\n#---- ComputeLatenciesBackwards ----\n");
1009 #endif
1010
1011 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1012 Node *n;
1013
1014 // Walk over all the nodes from last to first
1015 while ((n = iter.next())) {
1016 // Set the latency for the definitions of this instruction
1017 partial_latency_of_defs(n);
1018 }
1019 } // end ComputeLatenciesBackwards
1020
1021 //------------------------------partial_latency_of_defs------------------------
1022 // Compute the latency impact of this node on all defs. This computes
1023 // a number that increases as we approach the beginning of the routine.
partial_latency_of_defs(Node * n)1024 void PhaseCFG::partial_latency_of_defs(Node *n) {
1025 // Set the latency for this instruction
1026 #ifndef PRODUCT
1027 if (trace_opto_pipelining()) {
1028 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1029 dump();
1030 }
1031 #endif
1032
1033 if (n->is_Proj()) {
1034 n = n->in(0);
1035 }
1036
1037 if (n->is_Root()) {
1038 return;
1039 }
1040
1041 uint nlen = n->len();
1042 uint use_latency = get_latency_for_node(n);
1043 uint use_pre_order = get_block_for_node(n)->_pre_order;
1044
1045 for (uint j = 0; j < nlen; j++) {
1046 Node *def = n->in(j);
1047
1048 if (!def || def == n) {
1049 continue;
1050 }
1051
1052 // Walk backwards thru projections
1053 if (def->is_Proj()) {
1054 def = def->in(0);
1055 }
1056
1057 #ifndef PRODUCT
1058 if (trace_opto_pipelining()) {
1059 tty->print("# in(%2d): ", j);
1060 def->dump();
1061 }
1062 #endif
1063
1064 // If the defining block is not known, assume it is ok
1065 Block *def_block = get_block_for_node(def);
1066 uint def_pre_order = def_block ? def_block->_pre_order : 0;
1067
1068 if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
1069 continue;
1070 }
1071
1072 uint delta_latency = n->latency(j);
1073 uint current_latency = delta_latency + use_latency;
1074
1075 if (get_latency_for_node(def) < current_latency) {
1076 set_latency_for_node(def, current_latency);
1077 }
1078
1079 #ifndef PRODUCT
1080 if (trace_opto_pipelining()) {
1081 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
1082 }
1083 #endif
1084 }
1085 }
1086
1087 //------------------------------latency_from_use-------------------------------
1088 // Compute the latency of a specific use
latency_from_use(Node * n,const Node * def,Node * use)1089 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
1090 // If self-reference, return no latency
1091 if (use == n || use->is_Root()) {
1092 return 0;
1093 }
1094
1095 uint def_pre_order = get_block_for_node(def)->_pre_order;
1096 uint latency = 0;
1097
1098 // If the use is not a projection, then it is simple...
1099 if (!use->is_Proj()) {
1100 #ifndef PRODUCT
1101 if (trace_opto_pipelining()) {
1102 tty->print("# out(): ");
1103 use->dump();
1104 }
1105 #endif
1106
1107 uint use_pre_order = get_block_for_node(use)->_pre_order;
1108
1109 if (use_pre_order < def_pre_order)
1110 return 0;
1111
1112 if (use_pre_order == def_pre_order && use->is_Phi())
1113 return 0;
1114
1115 uint nlen = use->len();
1116 uint nl = get_latency_for_node(use);
1117
1118 for ( uint j=0; j<nlen; j++ ) {
1119 if (use->in(j) == n) {
1120 // Change this if we want local latencies
1121 uint ul = use->latency(j);
1122 uint l = ul + nl;
1123 if (latency < l) latency = l;
1124 #ifndef PRODUCT
1125 if (trace_opto_pipelining()) {
1126 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d",
1127 nl, j, ul, l, latency);
1128 }
1129 #endif
1130 }
1131 }
1132 } else {
1133 // This is a projection, just grab the latency of the use(s)
1134 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1135 uint l = latency_from_use(use, def, use->fast_out(j));
1136 if (latency < l) latency = l;
1137 }
1138 }
1139
1140 return latency;
1141 }
1142
1143 //------------------------------latency_from_uses------------------------------
1144 // Compute the latency of this instruction relative to all of it's uses.
1145 // This computes a number that increases as we approach the beginning of the
1146 // routine.
latency_from_uses(Node * n)1147 void PhaseCFG::latency_from_uses(Node *n) {
1148 // Set the latency for this instruction
1149 #ifndef PRODUCT
1150 if (trace_opto_pipelining()) {
1151 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1152 dump();
1153 }
1154 #endif
1155 uint latency=0;
1156 const Node *def = n->is_Proj() ? n->in(0): n;
1157
1158 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1159 uint l = latency_from_use(n, def, n->fast_out(i));
1160
1161 if (latency < l) latency = l;
1162 }
1163
1164 set_latency_for_node(n, latency);
1165 }
1166
1167 //------------------------------hoist_to_cheaper_block-------------------------
1168 // Pick a block for node self, between early and LCA, that is a cheaper
1169 // alternative to LCA.
hoist_to_cheaper_block(Block * LCA,Block * early,Node * self)1170 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1171 const double delta = 1+PROB_UNLIKELY_MAG(4);
1172 Block* least = LCA;
1173 double least_freq = least->_freq;
1174 uint target = get_latency_for_node(self);
1175 uint start_latency = get_latency_for_node(LCA->head());
1176 uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1177 bool in_latency = (target <= start_latency);
1178 const Block* root_block = get_block_for_node(_root);
1179
1180 // Turn off latency scheduling if scheduling is just plain off
1181 if (!C->do_scheduling())
1182 in_latency = true;
1183
1184 // Do not hoist (to cover latency) instructions which target a
1185 // single register. Hoisting stretches the live range of the
1186 // single register and may force spilling.
1187 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1188 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
1189 in_latency = true;
1190
1191 #ifndef PRODUCT
1192 if (trace_opto_pipelining()) {
1193 tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1194 self->dump();
1195 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1196 LCA->_pre_order,
1197 LCA->head()->_idx,
1198 start_latency,
1199 LCA->get_node(LCA->end_idx())->_idx,
1200 end_latency,
1201 least_freq);
1202 }
1203 #endif
1204
1205 int cand_cnt = 0; // number of candidates tried
1206
1207 // Walk up the dominator tree from LCA (Lowest common ancestor) to
1208 // the earliest legal location. Capture the least execution frequency.
1209 while (LCA != early) {
1210 LCA = LCA->_idom; // Follow up the dominator tree
1211
1212 if (LCA == NULL) {
1213 // Bailout without retry
1214 assert(false, "graph should be schedulable");
1215 C->record_method_not_compilable("late schedule failed: LCA == NULL");
1216 return least;
1217 }
1218
1219 // Don't hoist machine instructions to the root basic block
1220 if (mach && LCA == root_block)
1221 break;
1222
1223 uint start_lat = get_latency_for_node(LCA->head());
1224 uint end_idx = LCA->end_idx();
1225 uint end_lat = get_latency_for_node(LCA->get_node(end_idx));
1226 double LCA_freq = LCA->_freq;
1227 #ifndef PRODUCT
1228 if (trace_opto_pipelining()) {
1229 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1230 LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1231 }
1232 #endif
1233 cand_cnt++;
1234 if (LCA_freq < least_freq || // Better Frequency
1235 (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode
1236 (!StressGCM && // Otherwise, choose with latency
1237 !in_latency && // No block containing latency
1238 LCA_freq < least_freq * delta && // No worse frequency
1239 target >= end_lat && // within latency range
1240 !self->is_iteratively_computed() ) // But don't hoist IV increments
1241 // because they may end up above other uses of their phi forcing
1242 // their result register to be different from their input.
1243 ) {
1244 least = LCA; // Found cheaper block
1245 least_freq = LCA_freq;
1246 start_latency = start_lat;
1247 end_latency = end_lat;
1248 if (target <= start_lat)
1249 in_latency = true;
1250 }
1251 }
1252
1253 #ifndef PRODUCT
1254 if (trace_opto_pipelining()) {
1255 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g",
1256 least->_pre_order, start_latency, least_freq);
1257 }
1258 #endif
1259
1260 // See if the latency needs to be updated
1261 if (target < end_latency) {
1262 #ifndef PRODUCT
1263 if (trace_opto_pipelining()) {
1264 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1265 }
1266 #endif
1267 set_latency_for_node(self, end_latency);
1268 partial_latency_of_defs(self);
1269 }
1270
1271 return least;
1272 }
1273
1274
1275 //------------------------------schedule_late-----------------------------------
1276 // Now schedule all codes as LATE as possible. This is the LCA in the
1277 // dominator tree of all USES of a value. Pick the block with the least
1278 // loop nesting depth that is lowest in the dominator tree.
1279 extern const char must_clone[];
schedule_late(VectorSet & visited,Node_Stack & stack)1280 void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
1281 #ifndef PRODUCT
1282 if (trace_opto_pipelining())
1283 tty->print("\n#---- schedule_late ----\n");
1284 #endif
1285
1286 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1287 Node *self;
1288
1289 // Walk over all the nodes from last to first
1290 while ((self = iter.next())) {
1291 Block* early = get_block_for_node(self); // Earliest legal placement
1292
1293 if (self->is_top()) {
1294 // Top node goes in bb #2 with other constants.
1295 // It must be special-cased, because it has no out edges.
1296 early->add_inst(self);
1297 continue;
1298 }
1299
1300 // No uses, just terminate
1301 if (self->outcnt() == 0) {
1302 assert(self->is_MachProj(), "sanity");
1303 continue; // Must be a dead machine projection
1304 }
1305
1306 // If node is pinned in the block, then no scheduling can be done.
1307 if( self->pinned() ) // Pinned in block?
1308 continue;
1309
1310 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1311 if (mach) {
1312 switch (mach->ideal_Opcode()) {
1313 case Op_CreateEx:
1314 // Don't move exception creation
1315 early->add_inst(self);
1316 continue;
1317 break;
1318 case Op_CheckCastPP: {
1319 // Don't move CheckCastPP nodes away from their input, if the input
1320 // is a rawptr (5071820).
1321 Node *def = self->in(1);
1322 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
1323 early->add_inst(self);
1324 #ifdef ASSERT
1325 _raw_oops.push(def);
1326 #endif
1327 continue;
1328 }
1329 break;
1330 }
1331 default:
1332 break;
1333 }
1334 if (C->has_irreducible_loop() && self->bottom_type()->has_memory()) {
1335 // If the CFG is irreducible, keep memory-writing nodes as close as
1336 // possible to their original block (given by the control input). This
1337 // prevents PhaseCFG::hoist_to_cheaper_block() from placing such nodes
1338 // into descendants of their original loop, as in the following example:
1339 //
1340 // Original placement of store in B1 (loop L1):
1341 //
1342 // B1 (L1):
1343 // m1 <- ..
1344 // m2 <- store m1, ..
1345 // B2 (L2):
1346 // jump B2
1347 // B3 (L1):
1348 // .. <- .. m2, ..
1349 //
1350 // Wrong "hoisting" of store to B2 (in loop L2, child of L1):
1351 //
1352 // B1 (L1):
1353 // m1 <- ..
1354 // B2 (L2):
1355 // m2 <- store m1, ..
1356 // # Wrong: m1 and m2 interfere at this point.
1357 // jump B2
1358 // B3 (L1):
1359 // .. <- .. m2, ..
1360 //
1361 // This "hoist inversion" can happen due to CFGLoop::compute_freq()'s
1362 // inaccurate estimation of frequencies for irreducible CFGs, which can
1363 // lead to for example assigning B1 and B3 a higher frequency than B2.
1364 #ifndef PRODUCT
1365 if (trace_opto_pipelining()) {
1366 tty->print_cr("# Irreducible loops: schedule in earliest block B%d:",
1367 early->_pre_order);
1368 self->dump();
1369 }
1370 #endif
1371 schedule_node_into_block(self, early);
1372 continue;
1373 }
1374 }
1375
1376 // Gather LCA of all uses
1377 Block *LCA = NULL;
1378 {
1379 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1380 // For all uses, find LCA
1381 Node* use = self->fast_out(i);
1382 LCA = raise_LCA_above_use(LCA, use, self, this);
1383 }
1384 guarantee(LCA != NULL, "There must be a LCA");
1385 } // (Hide defs of imax, i from rest of block.)
1386
1387 // Place temps in the block of their use. This isn't a
1388 // requirement for correctness but it reduces useless
1389 // interference between temps and other nodes.
1390 if (mach != NULL && mach->is_MachTemp()) {
1391 map_node_to_block(self, LCA);
1392 LCA->add_inst(self);
1393 continue;
1394 }
1395
1396 // Check if 'self' could be anti-dependent on memory
1397 if (self->needs_anti_dependence_check()) {
1398 // Hoist LCA above possible-defs and insert anti-dependences to
1399 // defs in new LCA block.
1400 LCA = insert_anti_dependences(LCA, self);
1401 }
1402
1403 if (early->_dom_depth > LCA->_dom_depth) {
1404 // Somehow the LCA has moved above the earliest legal point.
1405 // (One way this can happen is via memory_early_block.)
1406 if (C->subsume_loads() == true && !C->failing()) {
1407 // Retry with subsume_loads == false
1408 // If this is the first failure, the sentinel string will "stick"
1409 // to the Compile object, and the C2Compiler will see it and retry.
1410 C->record_failure(C2Compiler::retry_no_subsuming_loads());
1411 } else {
1412 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1413 assert(false, "graph should be schedulable");
1414 C->record_method_not_compilable("late schedule failed: incorrect graph");
1415 }
1416 return;
1417 }
1418
1419 // If there is no opportunity to hoist, then we're done.
1420 // In stress mode, try to hoist even the single operations.
1421 bool try_to_hoist = StressGCM || (LCA != early);
1422
1423 // Must clone guys stay next to use; no hoisting allowed.
1424 // Also cannot hoist guys that alter memory or are otherwise not
1425 // allocatable (hoisting can make a value live longer, leading to
1426 // anti and output dependency problems which are normally resolved
1427 // by the register allocator giving everyone a different register).
1428 if (mach != NULL && must_clone[mach->ideal_Opcode()])
1429 try_to_hoist = false;
1430
1431 Block* late = NULL;
1432 if (try_to_hoist) {
1433 // Now find the block with the least execution frequency.
1434 // Start at the latest schedule and work up to the earliest schedule
1435 // in the dominator tree. Thus the Node will dominate all its uses.
1436 late = hoist_to_cheaper_block(LCA, early, self);
1437 } else {
1438 // Just use the LCA of the uses.
1439 late = LCA;
1440 }
1441
1442 // Put the node into target block
1443 schedule_node_into_block(self, late);
1444
1445 #ifdef ASSERT
1446 if (self->needs_anti_dependence_check()) {
1447 // since precedence edges are only inserted when we're sure they
1448 // are needed make sure that after placement in a block we don't
1449 // need any new precedence edges.
1450 verify_anti_dependences(late, self);
1451 }
1452 #endif
1453 } // Loop until all nodes have been visited
1454
1455 } // end ScheduleLate
1456
1457 //------------------------------GlobalCodeMotion-------------------------------
global_code_motion()1458 void PhaseCFG::global_code_motion() {
1459 ResourceMark rm;
1460
1461 #ifndef PRODUCT
1462 if (trace_opto_pipelining()) {
1463 tty->print("\n---- Start GlobalCodeMotion ----\n");
1464 }
1465 #endif
1466
1467 // Initialize the node to block mapping for things on the proj_list
1468 for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1469 unmap_node_from_block(_matcher.get_projection(i));
1470 }
1471
1472 // Set the basic block for Nodes pinned into blocks
1473 Arena* arena = Thread::current()->resource_area();
1474 VectorSet visited(arena);
1475 schedule_pinned_nodes(visited);
1476
1477 // Find the earliest Block any instruction can be placed in. Some
1478 // instructions are pinned into Blocks. Unpinned instructions can
1479 // appear in last block in which all their inputs occur.
1480 visited.Clear();
1481 Node_Stack stack(arena, (C->live_nodes() >> 2) + 16); // pre-grow
1482 if (!schedule_early(visited, stack)) {
1483 // Bailout without retry
1484 C->record_method_not_compilable("early schedule failed");
1485 return;
1486 }
1487
1488 // Build Def-Use edges.
1489 // Compute the latency information (via backwards walk) for all the
1490 // instructions in the graph
1491 _node_latency = new GrowableArray<uint>(); // resource_area allocation
1492
1493 if (C->do_scheduling()) {
1494 compute_latencies_backwards(visited, stack);
1495 }
1496
1497 // Now schedule all codes as LATE as possible. This is the LCA in the
1498 // dominator tree of all USES of a value. Pick the block with the least
1499 // loop nesting depth that is lowest in the dominator tree.
1500 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
1501 schedule_late(visited, stack);
1502 if (C->failing()) {
1503 return;
1504 }
1505
1506 #ifndef PRODUCT
1507 if (trace_opto_pipelining()) {
1508 tty->print("\n---- Detect implicit null checks ----\n");
1509 }
1510 #endif
1511
1512 // Detect implicit-null-check opportunities. Basically, find NULL checks
1513 // with suitable memory ops nearby. Use the memory op to do the NULL check.
1514 // I can generate a memory op if there is not one nearby.
1515 if (C->is_method_compilation()) {
1516 // By reversing the loop direction we get a very minor gain on mpegaudio.
1517 // Feel free to revert to a forward loop for clarity.
1518 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1519 for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1520 Node* proj = _matcher._null_check_tests[i];
1521 Node* val = _matcher._null_check_tests[i + 1];
1522 Block* block = get_block_for_node(proj);
1523 implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1524 // The implicit_null_check will only perform the transformation
1525 // if the null branch is truly uncommon, *and* it leads to an
1526 // uncommon trap. Combined with the too_many_traps guards
1527 // above, this prevents SEGV storms reported in 6366351,
1528 // by recompiling offending methods without this optimization.
1529 }
1530 }
1531
1532 bool block_size_threshold_ok = false;
1533 intptr_t *recalc_pressure_nodes = NULL;
1534 if (OptoRegScheduling) {
1535 for (uint i = 0; i < number_of_blocks(); i++) {
1536 Block* block = get_block(i);
1537 if (block->number_of_nodes() > 10) {
1538 block_size_threshold_ok = true;
1539 break;
1540 }
1541 }
1542 }
1543
1544 // Enabling the scheduler for register pressure plus finding blocks of size to schedule for it
1545 // is key to enabling this feature.
1546 PhaseChaitin regalloc(C->unique(), *this, _matcher, true);
1547 ResourceArea live_arena(mtCompiler); // Arena for liveness
1548 ResourceMark rm_live(&live_arena);
1549 PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true);
1550 PhaseIFG ifg(&live_arena);
1551 if (OptoRegScheduling && block_size_threshold_ok) {
1552 regalloc.mark_ssa();
1553 Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
1554 rm_live.reset_to_mark(); // Reclaim working storage
1555 IndexSet::reset_memory(C, &live_arena);
1556 uint node_size = regalloc._lrg_map.max_lrg_id();
1557 ifg.init(node_size); // Empty IFG
1558 regalloc.set_ifg(ifg);
1559 regalloc.set_live(live);
1560 regalloc.gather_lrg_masks(false); // Collect LRG masks
1561 live.compute(node_size); // Compute liveness
1562
1563 recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size);
1564 for (uint i = 0; i < node_size; i++) {
1565 recalc_pressure_nodes[i] = 0;
1566 }
1567 }
1568 _regalloc = ®alloc;
1569
1570 #ifndef PRODUCT
1571 if (trace_opto_pipelining()) {
1572 tty->print("\n---- Start Local Scheduling ----\n");
1573 }
1574 #endif
1575
1576 // Schedule locally. Right now a simple topological sort.
1577 // Later, do a real latency aware scheduler.
1578 GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1579 visited.Clear();
1580 for (uint i = 0; i < number_of_blocks(); i++) {
1581 Block* block = get_block(i);
1582 if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) {
1583 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1584 C->record_method_not_compilable("local schedule failed");
1585 }
1586 _regalloc = NULL;
1587 return;
1588 }
1589 }
1590 _regalloc = NULL;
1591
1592 // If we inserted any instructions between a Call and his CatchNode,
1593 // clone the instructions on all paths below the Catch.
1594 for (uint i = 0; i < number_of_blocks(); i++) {
1595 Block* block = get_block(i);
1596 call_catch_cleanup(block);
1597 }
1598
1599 #ifndef PRODUCT
1600 if (trace_opto_pipelining()) {
1601 tty->print("\n---- After GlobalCodeMotion ----\n");
1602 for (uint i = 0; i < number_of_blocks(); i++) {
1603 Block* block = get_block(i);
1604 block->dump();
1605 }
1606 }
1607 #endif
1608 // Dead.
1609 _node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);
1610 }
1611
do_global_code_motion()1612 bool PhaseCFG::do_global_code_motion() {
1613
1614 build_dominator_tree();
1615 if (C->failing()) {
1616 return false;
1617 }
1618
1619 NOT_PRODUCT( C->verify_graph_edges(); )
1620
1621 estimate_block_frequency();
1622
1623 global_code_motion();
1624
1625 if (C->failing()) {
1626 return false;
1627 }
1628
1629 return true;
1630 }
1631
1632 //------------------------------Estimate_Block_Frequency-----------------------
1633 // Estimate block frequencies based on IfNode probabilities.
estimate_block_frequency()1634 void PhaseCFG::estimate_block_frequency() {
1635
1636 // Force conditional branches leading to uncommon traps to be unlikely,
1637 // not because we get to the uncommon_trap with less relative frequency,
1638 // but because an uncommon_trap typically causes a deopt, so we only get
1639 // there once.
1640 if (C->do_freq_based_layout()) {
1641 Block_List worklist;
1642 Block* root_blk = get_block(0);
1643 for (uint i = 1; i < root_blk->num_preds(); i++) {
1644 Block *pb = get_block_for_node(root_blk->pred(i));
1645 if (pb->has_uncommon_code()) {
1646 worklist.push(pb);
1647 }
1648 }
1649 while (worklist.size() > 0) {
1650 Block* uct = worklist.pop();
1651 if (uct == get_root_block()) {
1652 continue;
1653 }
1654 for (uint i = 1; i < uct->num_preds(); i++) {
1655 Block *pb = get_block_for_node(uct->pred(i));
1656 if (pb->_num_succs == 1) {
1657 worklist.push(pb);
1658 } else if (pb->num_fall_throughs() == 2) {
1659 pb->update_uncommon_branch(uct);
1660 }
1661 }
1662 }
1663 }
1664
1665 // Create the loop tree and calculate loop depth.
1666 _root_loop = create_loop_tree();
1667 _root_loop->compute_loop_depth(0);
1668
1669 // Compute block frequency of each block, relative to a single loop entry.
1670 _root_loop->compute_freq();
1671
1672 // Adjust all frequencies to be relative to a single method entry
1673 _root_loop->_freq = 1.0;
1674 _root_loop->scale_freq();
1675
1676 // Save outmost loop frequency for LRG frequency threshold
1677 _outer_loop_frequency = _root_loop->outer_loop_freq();
1678
1679 // force paths ending at uncommon traps to be infrequent
1680 if (!C->do_freq_based_layout()) {
1681 Block_List worklist;
1682 Block* root_blk = get_block(0);
1683 for (uint i = 1; i < root_blk->num_preds(); i++) {
1684 Block *pb = get_block_for_node(root_blk->pred(i));
1685 if (pb->has_uncommon_code()) {
1686 worklist.push(pb);
1687 }
1688 }
1689 while (worklist.size() > 0) {
1690 Block* uct = worklist.pop();
1691 uct->_freq = PROB_MIN;
1692 for (uint i = 1; i < uct->num_preds(); i++) {
1693 Block *pb = get_block_for_node(uct->pred(i));
1694 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1695 worklist.push(pb);
1696 }
1697 }
1698 }
1699 }
1700
1701 #ifdef ASSERT
1702 for (uint i = 0; i < number_of_blocks(); i++) {
1703 Block* b = get_block(i);
1704 assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
1705 }
1706 #endif
1707
1708 #ifndef PRODUCT
1709 if (PrintCFGBlockFreq) {
1710 tty->print_cr("CFG Block Frequencies");
1711 _root_loop->dump_tree();
1712 if (Verbose) {
1713 tty->print_cr("PhaseCFG dump");
1714 dump();
1715 tty->print_cr("Node dump");
1716 _root->dump(99999);
1717 }
1718 }
1719 #endif
1720 }
1721
1722 //----------------------------create_loop_tree--------------------------------
1723 // Create a loop tree from the CFG
create_loop_tree()1724 CFGLoop* PhaseCFG::create_loop_tree() {
1725
1726 #ifdef ASSERT
1727 assert(get_block(0) == get_root_block(), "first block should be root block");
1728 for (uint i = 0; i < number_of_blocks(); i++) {
1729 Block* block = get_block(i);
1730 // Check that _loop field are clear...we could clear them if not.
1731 assert(block->_loop == NULL, "clear _loop expected");
1732 // Sanity check that the RPO numbering is reflected in the _blocks array.
1733 // It doesn't have to be for the loop tree to be built, but if it is not,
1734 // then the blocks have been reordered since dom graph building...which
1735 // may question the RPO numbering
1736 assert(block->_rpo == i, "unexpected reverse post order number");
1737 }
1738 #endif
1739
1740 int idct = 0;
1741 CFGLoop* root_loop = new CFGLoop(idct++);
1742
1743 Block_List worklist;
1744
1745 // Assign blocks to loops
1746 for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
1747 Block* block = get_block(i);
1748
1749 if (block->head()->is_Loop()) {
1750 Block* loop_head = block;
1751 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1752 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1753 Block* tail = get_block_for_node(tail_n);
1754
1755 // Defensively filter out Loop nodes for non-single-entry loops.
1756 // For all reasonable loops, the head occurs before the tail in RPO.
1757 if (i <= tail->_rpo) {
1758
1759 // The tail and (recursive) predecessors of the tail
1760 // are made members of a new loop.
1761
1762 assert(worklist.size() == 0, "nonempty worklist");
1763 CFGLoop* nloop = new CFGLoop(idct++);
1764 assert(loop_head->_loop == NULL, "just checking");
1765 loop_head->_loop = nloop;
1766 // Add to nloop so push_pred() will skip over inner loops
1767 nloop->add_member(loop_head);
1768 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
1769
1770 while (worklist.size() > 0) {
1771 Block* member = worklist.pop();
1772 if (member != loop_head) {
1773 for (uint j = 1; j < member->num_preds(); j++) {
1774 nloop->push_pred(member, j, worklist, this);
1775 }
1776 }
1777 }
1778 }
1779 }
1780 }
1781
1782 // Create a member list for each loop consisting
1783 // of both blocks and (immediate child) loops.
1784 for (uint i = 0; i < number_of_blocks(); i++) {
1785 Block* block = get_block(i);
1786 CFGLoop* lp = block->_loop;
1787 if (lp == NULL) {
1788 // Not assigned to a loop. Add it to the method's pseudo loop.
1789 block->_loop = root_loop;
1790 lp = root_loop;
1791 }
1792 if (lp == root_loop || block != lp->head()) { // loop heads are already members
1793 lp->add_member(block);
1794 }
1795 if (lp != root_loop) {
1796 if (lp->parent() == NULL) {
1797 // Not a nested loop. Make it a child of the method's pseudo loop.
1798 root_loop->add_nested_loop(lp);
1799 }
1800 if (block == lp->head()) {
1801 // Add nested loop to member list of parent loop.
1802 lp->parent()->add_member(lp);
1803 }
1804 }
1805 }
1806
1807 return root_loop;
1808 }
1809
1810 //------------------------------push_pred--------------------------------------
push_pred(Block * blk,int i,Block_List & worklist,PhaseCFG * cfg)1811 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
1812 Node* pred_n = blk->pred(i);
1813 Block* pred = cfg->get_block_for_node(pred_n);
1814 CFGLoop *pred_loop = pred->_loop;
1815 if (pred_loop == NULL) {
1816 // Filter out blocks for non-single-entry loops.
1817 // For all reasonable loops, the head occurs before the tail in RPO.
1818 if (pred->_rpo > head()->_rpo) {
1819 pred->_loop = this;
1820 worklist.push(pred);
1821 }
1822 } else if (pred_loop != this) {
1823 // Nested loop.
1824 while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
1825 pred_loop = pred_loop->_parent;
1826 }
1827 // Make pred's loop be a child
1828 if (pred_loop->_parent == NULL) {
1829 add_nested_loop(pred_loop);
1830 // Continue with loop entry predecessor.
1831 Block* pred_head = pred_loop->head();
1832 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1833 assert(pred_head != head(), "loop head in only one loop");
1834 push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
1835 } else {
1836 assert(pred_loop->_parent == this && _parent == NULL, "just checking");
1837 }
1838 }
1839 }
1840
1841 //------------------------------add_nested_loop--------------------------------
1842 // Make cl a child of the current loop in the loop tree.
add_nested_loop(CFGLoop * cl)1843 void CFGLoop::add_nested_loop(CFGLoop* cl) {
1844 assert(_parent == NULL, "no parent yet");
1845 assert(cl != this, "not my own parent");
1846 cl->_parent = this;
1847 CFGLoop* ch = _child;
1848 if (ch == NULL) {
1849 _child = cl;
1850 } else {
1851 while (ch->_sibling != NULL) { ch = ch->_sibling; }
1852 ch->_sibling = cl;
1853 }
1854 }
1855
1856 //------------------------------compute_loop_depth-----------------------------
1857 // Store the loop depth in each CFGLoop object.
1858 // Recursively walk the children to do the same for them.
compute_loop_depth(int depth)1859 void CFGLoop::compute_loop_depth(int depth) {
1860 _depth = depth;
1861 CFGLoop* ch = _child;
1862 while (ch != NULL) {
1863 ch->compute_loop_depth(depth + 1);
1864 ch = ch->_sibling;
1865 }
1866 }
1867
1868 //------------------------------compute_freq-----------------------------------
1869 // Compute the frequency of each block and loop, relative to a single entry
1870 // into the dominating loop head.
compute_freq()1871 void CFGLoop::compute_freq() {
1872 // Bottom up traversal of loop tree (visit inner loops first.)
1873 // Set loop head frequency to 1.0, then transitively
1874 // compute frequency for all successors in the loop,
1875 // as well as for each exit edge. Inner loops are
1876 // treated as single blocks with loop exit targets
1877 // as the successor blocks.
1878
1879 // Nested loops first
1880 CFGLoop* ch = _child;
1881 while (ch != NULL) {
1882 ch->compute_freq();
1883 ch = ch->_sibling;
1884 }
1885 assert (_members.length() > 0, "no empty loops");
1886 Block* hd = head();
1887 hd->_freq = 1.0;
1888 for (int i = 0; i < _members.length(); i++) {
1889 CFGElement* s = _members.at(i);
1890 double freq = s->_freq;
1891 if (s->is_block()) {
1892 Block* b = s->as_Block();
1893 for (uint j = 0; j < b->_num_succs; j++) {
1894 Block* sb = b->_succs[j];
1895 update_succ_freq(sb, freq * b->succ_prob(j));
1896 }
1897 } else {
1898 CFGLoop* lp = s->as_CFGLoop();
1899 assert(lp->_parent == this, "immediate child");
1900 for (int k = 0; k < lp->_exits.length(); k++) {
1901 Block* eb = lp->_exits.at(k).get_target();
1902 double prob = lp->_exits.at(k).get_prob();
1903 update_succ_freq(eb, freq * prob);
1904 }
1905 }
1906 }
1907
1908 // For all loops other than the outer, "method" loop,
1909 // sum and normalize the exit probability. The "method" loop
1910 // should keep the initial exit probability of 1, so that
1911 // inner blocks do not get erroneously scaled.
1912 if (_depth != 0) {
1913 // Total the exit probabilities for this loop.
1914 double exits_sum = 0.0f;
1915 for (int i = 0; i < _exits.length(); i++) {
1916 exits_sum += _exits.at(i).get_prob();
1917 }
1918
1919 // Normalize the exit probabilities. Until now, the
1920 // probabilities estimate the possibility of exit per
1921 // a single loop iteration; afterward, they estimate
1922 // the probability of exit per loop entry.
1923 for (int i = 0; i < _exits.length(); i++) {
1924 Block* et = _exits.at(i).get_target();
1925 float new_prob = 0.0f;
1926 if (_exits.at(i).get_prob() > 0.0f) {
1927 new_prob = _exits.at(i).get_prob() / exits_sum;
1928 }
1929 BlockProbPair bpp(et, new_prob);
1930 _exits.at_put(i, bpp);
1931 }
1932
1933 // Save the total, but guard against unreasonable probability,
1934 // as the value is used to estimate the loop trip count.
1935 // An infinite trip count would blur relative block
1936 // frequencies.
1937 if (exits_sum > 1.0f) exits_sum = 1.0;
1938 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
1939 _exit_prob = exits_sum;
1940 }
1941 }
1942
1943 //------------------------------succ_prob-------------------------------------
1944 // Determine the probability of reaching successor 'i' from the receiver block.
succ_prob(uint i)1945 float Block::succ_prob(uint i) {
1946 int eidx = end_idx();
1947 Node *n = get_node(eidx); // Get ending Node
1948
1949 int op = n->Opcode();
1950 if (n->is_Mach()) {
1951 if (n->is_MachNullCheck()) {
1952 // Can only reach here if called after lcm. The original Op_If is gone,
1953 // so we attempt to infer the probability from one or both of the
1954 // successor blocks.
1955 assert(_num_succs == 2, "expecting 2 successors of a null check");
1956 // If either successor has only one predecessor, then the
1957 // probability estimate can be derived using the
1958 // relative frequency of the successor and this block.
1959 if (_succs[i]->num_preds() == 2) {
1960 return _succs[i]->_freq / _freq;
1961 } else if (_succs[1-i]->num_preds() == 2) {
1962 return 1 - (_succs[1-i]->_freq / _freq);
1963 } else {
1964 // Estimate using both successor frequencies
1965 float freq = _succs[i]->_freq;
1966 return freq / (freq + _succs[1-i]->_freq);
1967 }
1968 }
1969 op = n->as_Mach()->ideal_Opcode();
1970 }
1971
1972
1973 // Switch on branch type
1974 switch( op ) {
1975 case Op_CountedLoopEnd:
1976 case Op_If: {
1977 assert (i < 2, "just checking");
1978 // Conditionals pass on only part of their frequency
1979 float prob = n->as_MachIf()->_prob;
1980 assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
1981 // If succ[i] is the FALSE branch, invert path info
1982 if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
1983 return 1.0f - prob; // not taken
1984 } else {
1985 return prob; // taken
1986 }
1987 }
1988
1989 case Op_Jump:
1990 return n->as_MachJump()->_probs[get_node(i + eidx + 1)->as_JumpProj()->_con];
1991
1992 case Op_Catch: {
1993 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
1994 if (ci->_con == CatchProjNode::fall_through_index) {
1995 // Fall-thru path gets the lion's share.
1996 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
1997 } else {
1998 // Presume exceptional paths are equally unlikely
1999 return PROB_UNLIKELY_MAG(5);
2000 }
2001 }
2002
2003 case Op_Root:
2004 case Op_Goto:
2005 // Pass frequency straight thru to target
2006 return 1.0f;
2007
2008 case Op_NeverBranch:
2009 return 0.0f;
2010
2011 case Op_TailCall:
2012 case Op_TailJump:
2013 case Op_Return:
2014 case Op_Halt:
2015 case Op_Rethrow:
2016 // Do not push out freq to root block
2017 return 0.0f;
2018
2019 default:
2020 ShouldNotReachHere();
2021 }
2022
2023 return 0.0f;
2024 }
2025
2026 //------------------------------num_fall_throughs-----------------------------
2027 // Return the number of fall-through candidates for a block
num_fall_throughs()2028 int Block::num_fall_throughs() {
2029 int eidx = end_idx();
2030 Node *n = get_node(eidx); // Get ending Node
2031
2032 int op = n->Opcode();
2033 if (n->is_Mach()) {
2034 if (n->is_MachNullCheck()) {
2035 // In theory, either side can fall-thru, for simplicity sake,
2036 // let's say only the false branch can now.
2037 return 1;
2038 }
2039 op = n->as_Mach()->ideal_Opcode();
2040 }
2041
2042 // Switch on branch type
2043 switch( op ) {
2044 case Op_CountedLoopEnd:
2045 case Op_If:
2046 return 2;
2047
2048 case Op_Root:
2049 case Op_Goto:
2050 return 1;
2051
2052 case Op_Catch: {
2053 for (uint i = 0; i < _num_succs; i++) {
2054 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2055 if (ci->_con == CatchProjNode::fall_through_index) {
2056 return 1;
2057 }
2058 }
2059 return 0;
2060 }
2061
2062 case Op_Jump:
2063 case Op_NeverBranch:
2064 case Op_TailCall:
2065 case Op_TailJump:
2066 case Op_Return:
2067 case Op_Halt:
2068 case Op_Rethrow:
2069 return 0;
2070
2071 default:
2072 ShouldNotReachHere();
2073 }
2074
2075 return 0;
2076 }
2077
2078 //------------------------------succ_fall_through-----------------------------
2079 // Return true if a specific successor could be fall-through target.
succ_fall_through(uint i)2080 bool Block::succ_fall_through(uint i) {
2081 int eidx = end_idx();
2082 Node *n = get_node(eidx); // Get ending Node
2083
2084 int op = n->Opcode();
2085 if (n->is_Mach()) {
2086 if (n->is_MachNullCheck()) {
2087 // In theory, either side can fall-thru, for simplicity sake,
2088 // let's say only the false branch can now.
2089 return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
2090 }
2091 op = n->as_Mach()->ideal_Opcode();
2092 }
2093
2094 // Switch on branch type
2095 switch( op ) {
2096 case Op_CountedLoopEnd:
2097 case Op_If:
2098 case Op_Root:
2099 case Op_Goto:
2100 return true;
2101
2102 case Op_Catch: {
2103 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2104 return ci->_con == CatchProjNode::fall_through_index;
2105 }
2106
2107 case Op_Jump:
2108 case Op_NeverBranch:
2109 case Op_TailCall:
2110 case Op_TailJump:
2111 case Op_Return:
2112 case Op_Halt:
2113 case Op_Rethrow:
2114 return false;
2115
2116 default:
2117 ShouldNotReachHere();
2118 }
2119
2120 return false;
2121 }
2122
2123 //------------------------------update_uncommon_branch------------------------
2124 // Update the probability of a two-branch to be uncommon
update_uncommon_branch(Block * ub)2125 void Block::update_uncommon_branch(Block* ub) {
2126 int eidx = end_idx();
2127 Node *n = get_node(eidx); // Get ending Node
2128
2129 int op = n->as_Mach()->ideal_Opcode();
2130
2131 assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
2132 assert(num_fall_throughs() == 2, "must be a two way branch block");
2133
2134 // Which successor is ub?
2135 uint s;
2136 for (s = 0; s <_num_succs; s++) {
2137 if (_succs[s] == ub) break;
2138 }
2139 assert(s < 2, "uncommon successor must be found");
2140
2141 // If ub is the true path, make the proability small, else
2142 // ub is the false path, and make the probability large
2143 bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
2144
2145 // Get existing probability
2146 float p = n->as_MachIf()->_prob;
2147
2148 if (invert) p = 1.0 - p;
2149 if (p > PROB_MIN) {
2150 p = PROB_MIN;
2151 }
2152 if (invert) p = 1.0 - p;
2153
2154 n->as_MachIf()->_prob = p;
2155 }
2156
2157 //------------------------------update_succ_freq-------------------------------
2158 // Update the appropriate frequency associated with block 'b', a successor of
2159 // a block in this loop.
update_succ_freq(Block * b,double freq)2160 void CFGLoop::update_succ_freq(Block* b, double freq) {
2161 if (b->_loop == this) {
2162 if (b == head()) {
2163 // back branch within the loop
2164 // Do nothing now, the loop carried frequency will be
2165 // adjust later in scale_freq().
2166 } else {
2167 // simple branch within the loop
2168 b->_freq += freq;
2169 }
2170 } else if (!in_loop_nest(b)) {
2171 // branch is exit from this loop
2172 BlockProbPair bpp(b, freq);
2173 _exits.append(bpp);
2174 } else {
2175 // branch into nested loop
2176 CFGLoop* ch = b->_loop;
2177 ch->_freq += freq;
2178 }
2179 }
2180
2181 //------------------------------in_loop_nest-----------------------------------
2182 // Determine if block b is in the receiver's loop nest.
in_loop_nest(Block * b)2183 bool CFGLoop::in_loop_nest(Block* b) {
2184 int depth = _depth;
2185 CFGLoop* b_loop = b->_loop;
2186 int b_depth = b_loop->_depth;
2187 if (depth == b_depth) {
2188 return true;
2189 }
2190 while (b_depth > depth) {
2191 b_loop = b_loop->_parent;
2192 b_depth = b_loop->_depth;
2193 }
2194 return b_loop == this;
2195 }
2196
2197 //------------------------------scale_freq-------------------------------------
2198 // Scale frequency of loops and blocks by trip counts from outer loops
2199 // Do a top down traversal of loop tree (visit outer loops first.)
scale_freq()2200 void CFGLoop::scale_freq() {
2201 double loop_freq = _freq * trip_count();
2202 _freq = loop_freq;
2203 for (int i = 0; i < _members.length(); i++) {
2204 CFGElement* s = _members.at(i);
2205 double block_freq = s->_freq * loop_freq;
2206 if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
2207 block_freq = MIN_BLOCK_FREQUENCY;
2208 s->_freq = block_freq;
2209 }
2210 CFGLoop* ch = _child;
2211 while (ch != NULL) {
2212 ch->scale_freq();
2213 ch = ch->_sibling;
2214 }
2215 }
2216
2217 // Frequency of outer loop
outer_loop_freq() const2218 double CFGLoop::outer_loop_freq() const {
2219 if (_child != NULL) {
2220 return _child->_freq;
2221 }
2222 return _freq;
2223 }
2224
2225 #ifndef PRODUCT
2226 //------------------------------dump_tree--------------------------------------
dump_tree() const2227 void CFGLoop::dump_tree() const {
2228 dump();
2229 if (_child != NULL) _child->dump_tree();
2230 if (_sibling != NULL) _sibling->dump_tree();
2231 }
2232
2233 //------------------------------dump-------------------------------------------
dump() const2234 void CFGLoop::dump() const {
2235 for (int i = 0; i < _depth; i++) tty->print(" ");
2236 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n",
2237 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2238 for (int i = 0; i < _depth; i++) tty->print(" ");
2239 tty->print(" members:");
2240 int k = 0;
2241 for (int i = 0; i < _members.length(); i++) {
2242 if (k++ >= 6) {
2243 tty->print("\n ");
2244 for (int j = 0; j < _depth+1; j++) tty->print(" ");
2245 k = 0;
2246 }
2247 CFGElement *s = _members.at(i);
2248 if (s->is_block()) {
2249 Block *b = s->as_Block();
2250 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2251 } else {
2252 CFGLoop* lp = s->as_CFGLoop();
2253 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2254 }
2255 }
2256 tty->print("\n");
2257 for (int i = 0; i < _depth; i++) tty->print(" ");
2258 tty->print(" exits: ");
2259 k = 0;
2260 for (int i = 0; i < _exits.length(); i++) {
2261 if (k++ >= 7) {
2262 tty->print("\n ");
2263 for (int j = 0; j < _depth+1; j++) tty->print(" ");
2264 k = 0;
2265 }
2266 Block *blk = _exits.at(i).get_target();
2267 double prob = _exits.at(i).get_prob();
2268 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2269 }
2270 tty->print("\n");
2271 }
2272 #endif
2273