1 /*
2 * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/connode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/divnode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/matcher.hpp"
38 #include "opto/mulnode.hpp"
39 #include "opto/movenode.hpp"
40 #include "opto/opaquenode.hpp"
41 #include "opto/rootnode.hpp"
42 #include "opto/subnode.hpp"
43 #include "opto/subtypenode.hpp"
44 #include "utilities/macros.hpp"
45
46 //=============================================================================
47 //------------------------------split_thru_phi---------------------------------
48 // Split Node 'n' through merge point if there is enough win.
split_thru_phi(Node * n,Node * region,int policy)49 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
50 if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
51 // ConvI2L may have type information on it which is unsafe to push up
52 // so disable this for now
53 return NULL;
54 }
55
56 // Splitting range check CastIIs through a loop induction Phi can
57 // cause new Phis to be created that are left unrelated to the loop
58 // induction Phi and prevent optimizations (vectorization)
59 if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
60 n->in(1) == region->as_CountedLoop()->phi()) {
61 return NULL;
62 }
63
64 // Bail out if 'n' is a Div or Mod node whose zero check was removed earlier (i.e. control is NULL) and its divisor is an induction variable
65 // phi p of a trip-counted (integer) loop whose inputs could be zero (include zero in their type range). p could have a more precise type
66 // range that does not necessarily include all values of its inputs. Since each of these inputs will be a divisor of the newly cloned nodes
67 // of 'n', we need to bail out of one of these divisors could be zero (zero in its type range).
68 if ((n->Opcode() == Op_DivI || n->Opcode() == Op_ModI) && n->in(0) == NULL
69 && region->is_CountedLoop() && n->in(2) == region->as_CountedLoop()->phi()) {
70 Node* phi = region->as_CountedLoop()->phi();
71 for (uint i = 1; i < phi->req(); i++) {
72 if (_igvn.type(phi->in(i))->filter_speculative(TypeInt::ZERO) != Type::TOP) {
73 // Zero could be a possible value but we already removed the zero check. Bail out to avoid a possible division by zero at a later point.
74 return NULL;
75 }
76 }
77 }
78
79 int wins = 0;
80 assert(!n->is_CFG(), "");
81 assert(region->is_Region(), "");
82
83 const Type* type = n->bottom_type();
84 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
85 Node* phi;
86 if (t_oop != NULL && t_oop->is_known_instance_field()) {
87 int iid = t_oop->instance_id();
88 int index = C->get_alias_index(t_oop);
89 int offset = t_oop->offset();
90 phi = new PhiNode(region, type, NULL, iid, index, offset);
91 } else {
92 phi = PhiNode::make_blank(region, n);
93 }
94 uint old_unique = C->unique();
95 for (uint i = 1; i < region->req(); i++) {
96 Node* x;
97 Node* the_clone = NULL;
98 if (region->in(i) == C->top()) {
99 x = C->top(); // Dead path? Use a dead data op
100 } else {
101 x = n->clone(); // Else clone up the data op
102 the_clone = x; // Remember for possible deletion.
103 // Alter data node to use pre-phi inputs
104 if (n->in(0) == region)
105 x->set_req( 0, region->in(i) );
106 for (uint j = 1; j < n->req(); j++) {
107 Node* in = n->in(j);
108 if (in->is_Phi() && in->in(0) == region)
109 x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
110 }
111 }
112 // Check for a 'win' on some paths
113 const Type* t = x->Value(&_igvn);
114
115 bool singleton = t->singleton();
116
117 // A TOP singleton indicates that there are no possible values incoming
118 // along a particular edge. In most cases, this is OK, and the Phi will
119 // be eliminated later in an Ideal call. However, we can't allow this to
120 // happen if the singleton occurs on loop entry, as the elimination of
121 // the PhiNode may cause the resulting node to migrate back to a previous
122 // loop iteration.
123 if (singleton && t == Type::TOP) {
124 // Is_Loop() == false does not confirm the absence of a loop (e.g., an
125 // irreducible loop may not be indicated by an affirmative is_Loop());
126 // therefore, the only top we can split thru a phi is on a backedge of
127 // a loop.
128 singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
129 }
130
131 if (singleton) {
132 wins++;
133 x = ((PhaseGVN&)_igvn).makecon(t);
134 } else {
135 // We now call Identity to try to simplify the cloned node.
136 // Note that some Identity methods call phase->type(this).
137 // Make sure that the type array is big enough for
138 // our new node, even though we may throw the node away.
139 // (Note: This tweaking with igvn only works because x is a new node.)
140 _igvn.set_type(x, t);
141 // If x is a TypeNode, capture any more-precise type permanently into Node
142 // otherwise it will be not updated during igvn->transform since
143 // igvn->type(x) is set to x->Value() already.
144 x->raise_bottom_type(t);
145 Node* y = x->Identity(&_igvn);
146 if (y != x) {
147 wins++;
148 x = y;
149 } else {
150 y = _igvn.hash_find(x);
151 if (y) {
152 wins++;
153 x = y;
154 } else {
155 // Else x is a new node we are keeping
156 // We do not need register_new_node_with_optimizer
157 // because set_type has already been called.
158 _igvn._worklist.push(x);
159 }
160 }
161 }
162 if (x != the_clone && the_clone != NULL)
163 _igvn.remove_dead_node(the_clone);
164 phi->set_req( i, x );
165 }
166 // Too few wins?
167 if (wins <= policy) {
168 _igvn.remove_dead_node(phi);
169 return NULL;
170 }
171
172 // Record Phi
173 register_new_node( phi, region );
174
175 for (uint i2 = 1; i2 < phi->req(); i2++) {
176 Node *x = phi->in(i2);
177 // If we commoned up the cloned 'x' with another existing Node,
178 // the existing Node picks up a new use. We need to make the
179 // existing Node occur higher up so it dominates its uses.
180 Node *old_ctrl;
181 IdealLoopTree *old_loop;
182
183 if (x->is_Con()) {
184 // Constant's control is always root.
185 set_ctrl(x, C->root());
186 continue;
187 }
188 // The occasional new node
189 if (x->_idx >= old_unique) { // Found a new, unplaced node?
190 old_ctrl = NULL;
191 old_loop = NULL; // Not in any prior loop
192 } else {
193 old_ctrl = get_ctrl(x);
194 old_loop = get_loop(old_ctrl); // Get prior loop
195 }
196 // New late point must dominate new use
197 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2));
198 if (new_ctrl == old_ctrl) // Nothing is changed
199 continue;
200
201 IdealLoopTree *new_loop = get_loop(new_ctrl);
202
203 // Don't move x into a loop if its uses are
204 // outside of loop. Otherwise x will be cloned
205 // for each use outside of this loop.
206 IdealLoopTree *use_loop = get_loop(region);
207 if (!new_loop->is_member(use_loop) &&
208 (old_loop == NULL || !new_loop->is_member(old_loop))) {
209 // Take early control, later control will be recalculated
210 // during next iteration of loop optimizations.
211 new_ctrl = get_early_ctrl(x);
212 new_loop = get_loop(new_ctrl);
213 }
214 // Set new location
215 set_ctrl(x, new_ctrl);
216 // If changing loop bodies, see if we need to collect into new body
217 if (old_loop != new_loop) {
218 if (old_loop && !old_loop->_child)
219 old_loop->_body.yank(x);
220 if (!new_loop->_child)
221 new_loop->_body.push(x); // Collect body info
222 }
223 }
224
225 return phi;
226 }
227
228 //------------------------------dominated_by------------------------------------
229 // Replace the dominated test with an obvious true or false. Place it on the
230 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the
231 // live path up to the dominating control.
dominated_by(Node * prevdom,Node * iff,bool flip,bool exclude_loop_predicate)232 void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exclude_loop_predicate ) {
233 if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
234
235 // prevdom is the dominating projection of the dominating test.
236 assert( iff->is_If(), "" );
237 assert(iff->Opcode() == Op_If || iff->Opcode() == Op_CountedLoopEnd || iff->Opcode() == Op_RangeCheck, "Check this code when new subtype is added");
238 int pop = prevdom->Opcode();
239 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
240 if (flip) {
241 if (pop == Op_IfTrue)
242 pop = Op_IfFalse;
243 else
244 pop = Op_IfTrue;
245 }
246 // 'con' is set to true or false to kill the dominated test.
247 Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
248 set_ctrl(con, C->root()); // Constant gets a new use
249 // Hack the dominated test
250 _igvn.replace_input_of(iff, 1, con);
251
252 // If I dont have a reachable TRUE and FALSE path following the IfNode then
253 // I can assume this path reaches an infinite loop. In this case it's not
254 // important to optimize the data Nodes - either the whole compilation will
255 // be tossed or this path (and all data Nodes) will go dead.
256 if (iff->outcnt() != 2) return;
257
258 // Make control-dependent data Nodes on the live path (path that will remain
259 // once the dominated IF is removed) become control-dependent on the
260 // dominating projection.
261 Node* dp = iff->as_If()->proj_out_or_null(pop == Op_IfTrue);
262
263 // Loop predicates may have depending checks which should not
264 // be skipped. For example, range check predicate has two checks
265 // for lower and upper bounds.
266 if (dp == NULL)
267 return;
268
269 ProjNode* dp_proj = dp->as_Proj();
270 ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
271 if (exclude_loop_predicate &&
272 (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL ||
273 unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != NULL ||
274 unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check) != NULL)) {
275 // If this is a range check (IfNode::is_range_check), do not
276 // reorder because Compile::allow_range_check_smearing might have
277 // changed the check.
278 return; // Let IGVN transformation change control dependence.
279 }
280
281 IdealLoopTree* old_loop = get_loop(dp);
282
283 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
284 Node* cd = dp->fast_out(i); // Control-dependent node
285 // Do not rewire Div and Mod nodes which could have a zero divisor to avoid skipping their zero check.
286 if (cd->depends_only_on_test() && _igvn.no_dependent_zero_check(cd)) {
287 assert(cd->in(0) == dp, "");
288 _igvn.replace_input_of(cd, 0, prevdom);
289 set_early_ctrl(cd, false);
290 IdealLoopTree* new_loop = get_loop(get_ctrl(cd));
291 if (old_loop != new_loop) {
292 if (!old_loop->_child) {
293 old_loop->_body.yank(cd);
294 }
295 if (!new_loop->_child) {
296 new_loop->_body.push(cd);
297 }
298 }
299 --i;
300 --imax;
301 }
302 }
303 }
304
305 //------------------------------has_local_phi_input----------------------------
306 // Return TRUE if 'n' has Phi inputs from its local block and no other
307 // block-local inputs (all non-local-phi inputs come from earlier blocks)
has_local_phi_input(Node * n)308 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) {
309 Node *n_ctrl = get_ctrl(n);
310 // See if some inputs come from a Phi in this block, or from before
311 // this block.
312 uint i;
313 for( i = 1; i < n->req(); i++ ) {
314 Node *phi = n->in(i);
315 if( phi->is_Phi() && phi->in(0) == n_ctrl )
316 break;
317 }
318 if( i >= n->req() )
319 return NULL; // No Phi inputs; nowhere to clone thru
320
321 // Check for inputs created between 'n' and the Phi input. These
322 // must split as well; they have already been given the chance
323 // (courtesy of a post-order visit) and since they did not we must
324 // recover the 'cost' of splitting them by being very profitable
325 // when splitting 'n'. Since this is unlikely we simply give up.
326 for( i = 1; i < n->req(); i++ ) {
327 Node *m = n->in(i);
328 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) {
329 // We allow the special case of AddP's with no local inputs.
330 // This allows us to split-up address expressions.
331 if (m->is_AddP() &&
332 get_ctrl(m->in(2)) != n_ctrl &&
333 get_ctrl(m->in(3)) != n_ctrl) {
334 // Move the AddP up to dominating point
335 Node* c = find_non_split_ctrl(idom(n_ctrl));
336 if (c->is_OuterStripMinedLoop()) {
337 c->as_Loop()->verify_strip_mined(1);
338 c = c->in(LoopNode::EntryControl);
339 }
340 set_ctrl_and_loop(m, c);
341 continue;
342 }
343 return NULL;
344 }
345 assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control");
346 }
347
348 return n_ctrl;
349 }
350
351 //------------------------------remix_address_expressions----------------------
352 // Rework addressing expressions to get the most loop-invariant stuff
353 // moved out. We'd like to do all associative operators, but it's especially
354 // important (common) to do address expressions.
remix_address_expressions(Node * n)355 Node *PhaseIdealLoop::remix_address_expressions( Node *n ) {
356 if (!has_ctrl(n)) return NULL;
357 Node *n_ctrl = get_ctrl(n);
358 IdealLoopTree *n_loop = get_loop(n_ctrl);
359
360 // See if 'n' mixes loop-varying and loop-invariant inputs and
361 // itself is loop-varying.
362
363 // Only interested in binary ops (and AddP)
364 if( n->req() < 3 || n->req() > 4 ) return NULL;
365
366 Node *n1_ctrl = get_ctrl(n->in( 1));
367 Node *n2_ctrl = get_ctrl(n->in( 2));
368 Node *n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3));
369 IdealLoopTree *n1_loop = get_loop( n1_ctrl );
370 IdealLoopTree *n2_loop = get_loop( n2_ctrl );
371 IdealLoopTree *n3_loop = get_loop( n3_ctrl );
372
373 // Does one of my inputs spin in a tighter loop than self?
374 if( (n_loop->is_member( n1_loop ) && n_loop != n1_loop) ||
375 (n_loop->is_member( n2_loop ) && n_loop != n2_loop) ||
376 (n_loop->is_member( n3_loop ) && n_loop != n3_loop) )
377 return NULL; // Leave well enough alone
378
379 // Is at least one of my inputs loop-invariant?
380 if( n1_loop == n_loop &&
381 n2_loop == n_loop &&
382 n3_loop == n_loop )
383 return NULL; // No loop-invariant inputs
384
385
386 int n_op = n->Opcode();
387
388 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2).
389 if( n_op == Op_LShiftI ) {
390 // Scale is loop invariant
391 Node *scale = n->in(2);
392 Node *scale_ctrl = get_ctrl(scale);
393 IdealLoopTree *scale_loop = get_loop(scale_ctrl );
394 if( n_loop == scale_loop || !scale_loop->is_member( n_loop ) )
395 return NULL;
396 const TypeInt *scale_t = scale->bottom_type()->isa_int();
397 if( scale_t && scale_t->is_con() && scale_t->get_con() >= 16 )
398 return NULL; // Dont bother with byte/short masking
399 // Add must vary with loop (else shift would be loop-invariant)
400 Node *add = n->in(1);
401 Node *add_ctrl = get_ctrl(add);
402 IdealLoopTree *add_loop = get_loop(add_ctrl);
403 //assert( n_loop == add_loop, "" );
404 if( n_loop != add_loop ) return NULL; // happens w/ evil ZKM loops
405
406 // Convert I-V into I+ (0-V); same for V-I
407 if( add->Opcode() == Op_SubI &&
408 _igvn.type( add->in(1) ) != TypeInt::ZERO ) {
409 Node *zero = _igvn.intcon(0);
410 set_ctrl(zero, C->root());
411 Node *neg = new SubINode( _igvn.intcon(0), add->in(2) );
412 register_new_node( neg, get_ctrl(add->in(2) ) );
413 add = new AddINode( add->in(1), neg );
414 register_new_node( add, add_ctrl );
415 }
416 if( add->Opcode() != Op_AddI ) return NULL;
417 // See if one add input is loop invariant
418 Node *add_var = add->in(1);
419 Node *add_var_ctrl = get_ctrl(add_var);
420 IdealLoopTree *add_var_loop = get_loop(add_var_ctrl );
421 Node *add_invar = add->in(2);
422 Node *add_invar_ctrl = get_ctrl(add_invar);
423 IdealLoopTree *add_invar_loop = get_loop(add_invar_ctrl );
424 if( add_var_loop == n_loop ) {
425 } else if( add_invar_loop == n_loop ) {
426 // Swap to find the invariant part
427 add_invar = add_var;
428 add_invar_ctrl = add_var_ctrl;
429 add_invar_loop = add_var_loop;
430 add_var = add->in(2);
431 Node *add_var_ctrl = get_ctrl(add_var);
432 IdealLoopTree *add_var_loop = get_loop(add_var_ctrl );
433 } else // Else neither input is loop invariant
434 return NULL;
435 if( n_loop == add_invar_loop || !add_invar_loop->is_member( n_loop ) )
436 return NULL; // No invariant part of the add?
437
438 // Yes! Reshape address expression!
439 Node *inv_scale = new LShiftINode( add_invar, scale );
440 Node *inv_scale_ctrl =
441 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
442 add_invar_ctrl : scale_ctrl;
443 register_new_node( inv_scale, inv_scale_ctrl );
444 Node *var_scale = new LShiftINode( add_var, scale );
445 register_new_node( var_scale, n_ctrl );
446 Node *var_add = new AddINode( var_scale, inv_scale );
447 register_new_node( var_add, n_ctrl );
448 _igvn.replace_node( n, var_add );
449 return var_add;
450 }
451
452 // Replace (I+V) with (V+I)
453 if( n_op == Op_AddI ||
454 n_op == Op_AddL ||
455 n_op == Op_AddF ||
456 n_op == Op_AddD ||
457 n_op == Op_MulI ||
458 n_op == Op_MulL ||
459 n_op == Op_MulF ||
460 n_op == Op_MulD ) {
461 if( n2_loop == n_loop ) {
462 assert( n1_loop != n_loop, "" );
463 n->swap_edges(1, 2);
464 }
465 }
466
467 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V),
468 // but not if I2 is a constant.
469 if( n_op == Op_AddP ) {
470 if( n2_loop == n_loop && n3_loop != n_loop ) {
471 if( n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con() ) {
472 Node *n22_ctrl = get_ctrl(n->in(2)->in(2));
473 Node *n23_ctrl = get_ctrl(n->in(2)->in(3));
474 IdealLoopTree *n22loop = get_loop( n22_ctrl );
475 IdealLoopTree *n23_loop = get_loop( n23_ctrl );
476 if( n22loop != n_loop && n22loop->is_member(n_loop) &&
477 n23_loop == n_loop ) {
478 Node *add1 = new AddPNode( n->in(1), n->in(2)->in(2), n->in(3) );
479 // Stuff new AddP in the loop preheader
480 register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) );
481 Node *add2 = new AddPNode( n->in(1), add1, n->in(2)->in(3) );
482 register_new_node( add2, n_ctrl );
483 _igvn.replace_node( n, add2 );
484 return add2;
485 }
486 }
487 }
488
489 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V)
490 if (n2_loop != n_loop && n3_loop == n_loop) {
491 if (n->in(3)->Opcode() == Op_AddX) {
492 Node *V = n->in(3)->in(1);
493 Node *I = n->in(3)->in(2);
494 if (is_member(n_loop,get_ctrl(V))) {
495 } else {
496 Node *tmp = V; V = I; I = tmp;
497 }
498 if (!is_member(n_loop,get_ctrl(I))) {
499 Node *add1 = new AddPNode(n->in(1), n->in(2), I);
500 // Stuff new AddP in the loop preheader
501 register_new_node(add1, n_loop->_head->in(LoopNode::EntryControl));
502 Node *add2 = new AddPNode(n->in(1), add1, V);
503 register_new_node(add2, n_ctrl);
504 _igvn.replace_node(n, add2);
505 return add2;
506 }
507 }
508 }
509 }
510
511 return NULL;
512 }
513
514 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1]))
convert_add_to_muladd(Node * n)515 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) {
516 assert(n->Opcode() == Op_AddI, "sanity");
517 Node * nn = NULL;
518 Node * in1 = n->in(1);
519 Node * in2 = n->in(2);
520 if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) {
521 IdealLoopTree* loop_n = get_loop(get_ctrl(n));
522 if (loop_n->is_counted() &&
523 loop_n->_head->as_Loop()->is_valid_counted_loop(T_INT) &&
524 Matcher::match_rule_supported(Op_MulAddVS2VI) &&
525 Matcher::match_rule_supported(Op_MulAddS2I)) {
526 Node* mul_in1 = in1->in(1);
527 Node* mul_in2 = in1->in(2);
528 Node* mul_in3 = in2->in(1);
529 Node* mul_in4 = in2->in(2);
530 if (mul_in1->Opcode() == Op_LoadS &&
531 mul_in2->Opcode() == Op_LoadS &&
532 mul_in3->Opcode() == Op_LoadS &&
533 mul_in4->Opcode() == Op_LoadS) {
534 IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1));
535 IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2));
536 IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3));
537 IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4));
538 IdealLoopTree* loop5 = get_loop(get_ctrl(in1));
539 IdealLoopTree* loop6 = get_loop(get_ctrl(in2));
540 // All nodes should be in the same counted loop.
541 if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 &&
542 loop_n == loop4 && loop_n == loop5 && loop_n == loop6) {
543 Node* adr1 = mul_in1->in(MemNode::Address);
544 Node* adr2 = mul_in2->in(MemNode::Address);
545 Node* adr3 = mul_in3->in(MemNode::Address);
546 Node* adr4 = mul_in4->in(MemNode::Address);
547 if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) {
548 if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) &&
549 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) {
550 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4);
551 register_new_node(nn, get_ctrl(n));
552 _igvn.replace_node(n, nn);
553 return nn;
554 } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) &&
555 (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) {
556 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3);
557 register_new_node(nn, get_ctrl(n));
558 _igvn.replace_node(n, nn);
559 return nn;
560 }
561 }
562 }
563 }
564 }
565 }
566 return nn;
567 }
568
569 //------------------------------conditional_move-------------------------------
570 // Attempt to replace a Phi with a conditional move. We have some pretty
571 // strict profitability requirements. All Phis at the merge point must
572 // be converted, so we can remove the control flow. We need to limit the
573 // number of c-moves to a small handful. All code that was in the side-arms
574 // of the CFG diamond is now speculatively executed. This code has to be
575 // "cheap enough". We are pretty much limited to CFG diamonds that merge
576 // 1 or 2 items with a total of 1 or 2 ops executed speculatively.
conditional_move(Node * region)577 Node *PhaseIdealLoop::conditional_move( Node *region ) {
578
579 assert(region->is_Region(), "sanity check");
580 if (region->req() != 3) return NULL;
581
582 // Check for CFG diamond
583 Node *lp = region->in(1);
584 Node *rp = region->in(2);
585 if (!lp || !rp) return NULL;
586 Node *lp_c = lp->in(0);
587 if (lp_c == NULL || lp_c != rp->in(0) || !lp_c->is_If()) return NULL;
588 IfNode *iff = lp_c->as_If();
589
590 // Check for ops pinned in an arm of the diamond.
591 // Can't remove the control flow in this case
592 if (lp->outcnt() > 1) return NULL;
593 if (rp->outcnt() > 1) return NULL;
594
595 IdealLoopTree* r_loop = get_loop(region);
596 assert(r_loop == get_loop(iff), "sanity");
597 // Always convert to CMOVE if all results are used only outside this loop.
598 bool used_inside_loop = (r_loop == _ltree_root);
599
600 // Check profitability
601 int cost = 0;
602 int phis = 0;
603 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
604 Node *out = region->fast_out(i);
605 if (!out->is_Phi()) continue; // Ignore other control edges, etc
606 phis++;
607 PhiNode* phi = out->as_Phi();
608 BasicType bt = phi->type()->basic_type();
609 switch (bt) {
610 case T_DOUBLE:
611 case T_FLOAT:
612 if (C->use_cmove()) {
613 continue; //TODO: maybe we want to add some cost
614 }
615 cost += Matcher::float_cmove_cost(); // Could be very expensive
616 break;
617 case T_LONG: {
618 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's
619 }
620 case T_INT: // These all CMOV fine
621 case T_ADDRESS: { // (RawPtr)
622 cost++;
623 break;
624 }
625 case T_NARROWOOP: // Fall through
626 case T_OBJECT: { // Base oops are OK, but not derived oops
627 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
628 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
629 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
630 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we
631 // have a Phi for the base here that we convert to a CMOVE all is well
632 // and good. But if the base is dead, we'll not make a CMOVE. Later
633 // the allocator will have to produce a base by creating a CMOVE of the
634 // relevant bases. This puts the allocator in the business of
635 // manufacturing expensive instructions, generally a bad plan.
636 // Just Say No to Conditionally-Moved Derived Pointers.
637 if (tp && tp->offset() != 0)
638 return NULL;
639 cost++;
640 break;
641 }
642 default:
643 return NULL; // In particular, can't do memory or I/O
644 }
645 // Add in cost any speculative ops
646 for (uint j = 1; j < region->req(); j++) {
647 Node *proj = region->in(j);
648 Node *inp = phi->in(j);
649 if (get_ctrl(inp) == proj) { // Found local op
650 cost++;
651 // Check for a chain of dependent ops; these will all become
652 // speculative in a CMOV.
653 for (uint k = 1; k < inp->req(); k++)
654 if (get_ctrl(inp->in(k)) == proj)
655 cost += ConditionalMoveLimit; // Too much speculative goo
656 }
657 }
658 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
659 // This will likely Split-If, a higher-payoff operation.
660 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
661 Node* use = phi->fast_out(k);
662 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
663 cost += ConditionalMoveLimit;
664 // Is there a use inside the loop?
665 // Note: check only basic types since CMoveP is pinned.
666 if (!used_inside_loop && is_java_primitive(bt)) {
667 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
668 if (r_loop == u_loop || r_loop->is_member(u_loop)) {
669 used_inside_loop = true;
670 }
671 }
672 }
673 }//for
674 Node* bol = iff->in(1);
675 if (bol->Opcode() == Op_Opaque4) {
676 return NULL; // Ignore loop predicate checks (the Opaque4 ensures they will go away)
677 }
678 assert(bol->Opcode() == Op_Bool, "Unexpected node");
679 int cmp_op = bol->in(1)->Opcode();
680 if (cmp_op == Op_SubTypeCheck) { // SubTypeCheck expansion expects an IfNode
681 return NULL;
682 }
683 // It is expensive to generate flags from a float compare.
684 // Avoid duplicated float compare.
685 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return NULL;
686
687 float infrequent_prob = PROB_UNLIKELY_MAG(3);
688 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
689 if (used_inside_loop) {
690 if (cost >= ConditionalMoveLimit) return NULL; // Too much goo
691
692 // BlockLayoutByFrequency optimization moves infrequent branch
693 // from hot path. No point in CMOV'ing in such case (110 is used
694 // instead of 100 to take into account not exactness of float value).
695 if (BlockLayoutByFrequency) {
696 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
697 }
698 }
699 // Check for highly predictable branch. No point in CMOV'ing if
700 // we are going to predict accurately all the time.
701 if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
702 //keep going
703 } else if (iff->_prob < infrequent_prob ||
704 iff->_prob > (1.0f - infrequent_prob))
705 return NULL;
706
707 // --------------
708 // Now replace all Phis with CMOV's
709 Node *cmov_ctrl = iff->in(0);
710 uint flip = (lp->Opcode() == Op_IfTrue);
711 Node_List wq;
712 while (1) {
713 PhiNode* phi = NULL;
714 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
715 Node *out = region->fast_out(i);
716 if (out->is_Phi()) {
717 phi = out->as_Phi();
718 break;
719 }
720 }
721 if (phi == NULL) break;
722 if (PrintOpto && VerifyLoopOptimizations) { tty->print_cr("CMOV"); }
723 // Move speculative ops
724 wq.push(phi);
725 while (wq.size() > 0) {
726 Node *n = wq.pop();
727 for (uint j = 1; j < n->req(); j++) {
728 Node* m = n->in(j);
729 if (m != NULL && !is_dominator(get_ctrl(m), cmov_ctrl)) {
730 #ifndef PRODUCT
731 if (PrintOpto && VerifyLoopOptimizations) {
732 tty->print(" speculate: ");
733 m->dump();
734 }
735 #endif
736 set_ctrl(m, cmov_ctrl);
737 wq.push(m);
738 }
739 }
740 }
741 Node *cmov = CMoveNode::make(cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
742 register_new_node( cmov, cmov_ctrl );
743 _igvn.replace_node( phi, cmov );
744 #ifndef PRODUCT
745 if (TraceLoopOpts) {
746 tty->print("CMOV ");
747 r_loop->dump_head();
748 if (Verbose) {
749 bol->in(1)->dump(1);
750 cmov->dump(1);
751 }
752 }
753 if (VerifyLoopOptimizations) verify();
754 #endif
755 }
756
757 // The useless CFG diamond will fold up later; see the optimization in
758 // RegionNode::Ideal.
759 _igvn._worklist.push(region);
760
761 return iff->in(1);
762 }
763
enqueue_cfg_uses(Node * m,Unique_Node_List & wq)764 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
765 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
766 Node* u = m->fast_out(i);
767 if (u->is_CFG()) {
768 if (u->Opcode() == Op_NeverBranch) {
769 u = ((NeverBranchNode*)u)->proj_out(0);
770 enqueue_cfg_uses(u, wq);
771 } else {
772 wq.push(u);
773 }
774 }
775 }
776 }
777
778 // Try moving a store out of a loop, right before the loop
try_move_store_before_loop(Node * n,Node * n_ctrl)779 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
780 // Store has to be first in the loop body
781 IdealLoopTree *n_loop = get_loop(n_ctrl);
782 if (n->is_Store() && n_loop != _ltree_root &&
783 n_loop->is_loop() && n_loop->_head->is_Loop() &&
784 n->in(0) != NULL) {
785 Node* address = n->in(MemNode::Address);
786 Node* value = n->in(MemNode::ValueIn);
787 Node* mem = n->in(MemNode::Memory);
788 IdealLoopTree* address_loop = get_loop(get_ctrl(address));
789 IdealLoopTree* value_loop = get_loop(get_ctrl(value));
790
791 // - address and value must be loop invariant
792 // - memory must be a memory Phi for the loop
793 // - Store must be the only store on this memory slice in the
794 // loop: if there's another store following this one then value
795 // written at iteration i by the second store could be overwritten
796 // at iteration i+n by the first store: it's not safe to move the
797 // first store out of the loop
798 // - nothing must observe the memory Phi: it guarantees no read
799 // before the store, we are also guaranteed the store post
800 // dominates the loop head (ignoring a possible early
801 // exit). Otherwise there would be extra Phi involved between the
802 // loop's Phi and the store.
803 // - there must be no early exit from the loop before the Store
804 // (such an exit most of the time would be an extra use of the
805 // memory Phi but sometimes is a bottom memory Phi that takes the
806 // store as input).
807
808 if (!n_loop->is_member(address_loop) &&
809 !n_loop->is_member(value_loop) &&
810 mem->is_Phi() && mem->in(0) == n_loop->_head &&
811 mem->outcnt() == 1 &&
812 mem->in(LoopNode::LoopBackControl) == n) {
813
814 assert(n_loop->_tail != NULL, "need a tail");
815 assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop");
816
817 // Verify that there's no early exit of the loop before the store.
818 bool ctrl_ok = false;
819 {
820 // Follow control from loop head until n, we exit the loop or
821 // we reach the tail
822 ResourceMark rm;
823 Unique_Node_List wq;
824 wq.push(n_loop->_head);
825
826 for (uint next = 0; next < wq.size(); ++next) {
827 Node *m = wq.at(next);
828 if (m == n->in(0)) {
829 ctrl_ok = true;
830 continue;
831 }
832 assert(!has_ctrl(m), "should be CFG");
833 if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) {
834 ctrl_ok = false;
835 break;
836 }
837 enqueue_cfg_uses(m, wq);
838 if (wq.size() > 10) {
839 ctrl_ok = false;
840 break;
841 }
842 }
843 }
844 if (ctrl_ok) {
845 // move the Store
846 _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
847 _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl));
848 _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl));
849 // Disconnect the phi now. An empty phi can confuse other
850 // optimizations in this pass of loop opts.
851 _igvn.replace_node(mem, mem->in(LoopNode::EntryControl));
852 n_loop->_body.yank(mem);
853
854 set_ctrl_and_loop(n, n->in(0));
855
856 return n;
857 }
858 }
859 }
860 return NULL;
861 }
862
863 // Try moving a store out of a loop, right after the loop
try_move_store_after_loop(Node * n)864 void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
865 if (n->is_Store() && n->in(0) != NULL) {
866 Node *n_ctrl = get_ctrl(n);
867 IdealLoopTree *n_loop = get_loop(n_ctrl);
868 // Store must be in a loop
869 if (n_loop != _ltree_root && !n_loop->_irreducible) {
870 Node* address = n->in(MemNode::Address);
871 Node* value = n->in(MemNode::ValueIn);
872 IdealLoopTree* address_loop = get_loop(get_ctrl(address));
873 // address must be loop invariant
874 if (!n_loop->is_member(address_loop)) {
875 // Store must be last on this memory slice in the loop and
876 // nothing in the loop must observe it
877 Node* phi = NULL;
878 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
879 Node* u = n->fast_out(i);
880 if (has_ctrl(u)) { // control use?
881 IdealLoopTree *u_loop = get_loop(get_ctrl(u));
882 if (!n_loop->is_member(u_loop)) {
883 continue;
884 }
885 if (u->is_Phi() && u->in(0) == n_loop->_head) {
886 assert(_igvn.type(u) == Type::MEMORY, "bad phi");
887 // multiple phis on the same slice are possible
888 if (phi != NULL) {
889 return;
890 }
891 phi = u;
892 continue;
893 }
894 }
895 return;
896 }
897 if (phi != NULL) {
898 // Nothing in the loop before the store (next iteration)
899 // must observe the stored value
900 bool mem_ok = true;
901 {
902 ResourceMark rm;
903 Unique_Node_List wq;
904 wq.push(phi);
905 for (uint next = 0; next < wq.size() && mem_ok; ++next) {
906 Node *m = wq.at(next);
907 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) {
908 Node* u = m->fast_out(i);
909 if (u->is_Store() || u->is_Phi()) {
910 if (u != n) {
911 wq.push(u);
912 mem_ok = (wq.size() <= 10);
913 }
914 } else {
915 mem_ok = false;
916 break;
917 }
918 }
919 }
920 }
921 if (mem_ok) {
922 // Move the store out of the loop if the LCA of all
923 // users (except for the phi) is outside the loop.
924 Node* hook = new Node(1);
925 hook->init_req(0, n_ctrl); // Add an input to prevent hook from being dead
926 _igvn.rehash_node_delayed(phi);
927 int count = phi->replace_edge(n, hook);
928 assert(count > 0, "inconsistent phi");
929
930 // Compute latest point this store can go
931 Node* lca = get_late_ctrl(n, get_ctrl(n));
932 if (lca->is_OuterStripMinedLoop()) {
933 lca = lca->in(LoopNode::EntryControl);
934 }
935 if (n_loop->is_member(get_loop(lca))) {
936 // LCA is in the loop - bail out
937 _igvn.replace_node(hook, n);
938 return;
939 }
940 #ifdef ASSERT
941 if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) {
942 assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined");
943 n_loop->_head->as_Loop()->verify_strip_mined(1);
944 Node* outer = n_loop->_head->as_CountedLoop()->outer_loop();
945 IdealLoopTree* outer_loop = get_loop(outer);
946 assert(n_loop->_parent == outer_loop, "broken loop tree");
947 assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state");
948 }
949 #endif
950
951 // Move store out of the loop
952 _igvn.replace_node(hook, n->in(MemNode::Memory));
953 _igvn.replace_input_of(n, 0, lca);
954 set_ctrl_and_loop(n, lca);
955
956 // Disconnect the phi now. An empty phi can confuse other
957 // optimizations in this pass of loop opts..
958 if (phi->in(LoopNode::LoopBackControl) == phi) {
959 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
960 n_loop->_body.yank(phi);
961 }
962 }
963 }
964 }
965 }
966 }
967 }
968
969 //------------------------------split_if_with_blocks_pre-----------------------
970 // Do the real work in a non-recursive function. Data nodes want to be
971 // cloned in the pre-order so they can feed each other nicely.
split_if_with_blocks_pre(Node * n)972 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
973 // Cloning these guys is unlikely to win
974 int n_op = n->Opcode();
975 if (n_op == Op_MergeMem) {
976 return n;
977 }
978 if (n->is_Proj()) {
979 return n;
980 }
981 // Do not clone-up CmpFXXX variations, as these are always
982 // followed by a CmpI
983 if (n->is_Cmp()) {
984 return n;
985 }
986 // Attempt to use a conditional move instead of a phi/branch
987 if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
988 Node *cmov = conditional_move( n );
989 if (cmov) {
990 return cmov;
991 }
992 }
993 if (n->is_CFG() || n->is_LoadStore()) {
994 return n;
995 }
996 if (n->is_Opaque1() || // Opaque nodes cannot be mod'd
997 n_op == Op_Opaque2) {
998 if (!C->major_progress()) { // If chance of no more loop opts...
999 _igvn._worklist.push(n); // maybe we'll remove them
1000 }
1001 return n;
1002 }
1003
1004 if (n->is_Con()) {
1005 return n; // No cloning for Con nodes
1006 }
1007
1008 Node *n_ctrl = get_ctrl(n);
1009 if (!n_ctrl) {
1010 return n; // Dead node
1011 }
1012
1013 Node* res = try_move_store_before_loop(n, n_ctrl);
1014 if (res != NULL) {
1015 return n;
1016 }
1017
1018 // Attempt to remix address expressions for loop invariants
1019 Node *m = remix_address_expressions( n );
1020 if( m ) return m;
1021
1022 if (n_op == Op_AddI) {
1023 Node *nn = convert_add_to_muladd( n );
1024 if ( nn ) return nn;
1025 }
1026
1027 if (n->is_ConstraintCast()) {
1028 Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this);
1029 // ConstraintCastNode::dominating_cast() uses node control input to determine domination.
1030 // Node control inputs don't necessarily agree with loop control info (due to
1031 // transformations happened in between), thus additional dominance check is needed
1032 // to keep loop info valid.
1033 if (dom_cast != NULL && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) {
1034 _igvn.replace_node(n, dom_cast);
1035 return dom_cast;
1036 }
1037 }
1038
1039 // Determine if the Node has inputs from some local Phi.
1040 // Returns the block to clone thru.
1041 Node *n_blk = has_local_phi_input( n );
1042 if( !n_blk ) return n;
1043
1044 // Do not clone the trip counter through on a CountedLoop
1045 // (messes up the canonical shape).
1046 if (((n_blk->is_CountedLoop() || (n_blk->is_Loop() && n_blk->as_Loop()->is_transformed_long_loop())) && n->Opcode() == Op_AddI) ||
1047 (n_blk->is_LongCountedLoop() && n->Opcode() == Op_AddL)) {
1048 return n;
1049 }
1050
1051 // Check for having no control input; not pinned. Allow
1052 // dominating control.
1053 if (n->in(0)) {
1054 Node *dom = idom(n_blk);
1055 if (dom_lca(n->in(0), dom) != n->in(0)) {
1056 return n;
1057 }
1058 }
1059 // Policy: when is it profitable. You must get more wins than
1060 // policy before it is considered profitable. Policy is usually 0,
1061 // so 1 win is considered profitable. Big merges will require big
1062 // cloning, so get a larger policy.
1063 int policy = n_blk->req() >> 2;
1064
1065 // If the loop is a candidate for range check elimination,
1066 // delay splitting through it's phi until a later loop optimization
1067 if (n_blk->is_CountedLoop()) {
1068 IdealLoopTree *lp = get_loop(n_blk);
1069 if (lp && lp->_rce_candidate) {
1070 return n;
1071 }
1072 }
1073
1074 if (must_throttle_split_if()) return n;
1075
1076 // Split 'n' through the merge point if it is profitable
1077 Node *phi = split_thru_phi( n, n_blk, policy );
1078 if (!phi) return n;
1079
1080 // Found a Phi to split thru!
1081 // Replace 'n' with the new phi
1082 _igvn.replace_node( n, phi );
1083 // Moved a load around the loop, 'en-registering' something.
1084 if (n_blk->is_Loop() && n->is_Load() &&
1085 !phi->in(LoopNode::LoopBackControl)->is_Load())
1086 C->set_major_progress();
1087
1088 return phi;
1089 }
1090
merge_point_too_heavy(Compile * C,Node * region)1091 static bool merge_point_too_heavy(Compile* C, Node* region) {
1092 // Bail out if the region and its phis have too many users.
1093 int weight = 0;
1094 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1095 weight += region->fast_out(i)->outcnt();
1096 }
1097 int nodes_left = C->max_node_limit() - C->live_nodes();
1098 if (weight * 8 > nodes_left) {
1099 if (PrintOpto) {
1100 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight);
1101 }
1102 return true;
1103 } else {
1104 return false;
1105 }
1106 }
1107
merge_point_safe(Node * region)1108 static bool merge_point_safe(Node* region) {
1109 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1110 // having a PhiNode input. This sidesteps the dangerous case where the split
1111 // ConvI2LNode may become TOP if the input Value() does not
1112 // overlap the ConvI2L range, leaving a node which may not dominate its
1113 // uses.
1114 // A better fix for this problem can be found in the BugTraq entry, but
1115 // expediency for Mantis demands this hack.
1116 #ifdef _LP64
1117 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1118 Node* n = region->fast_out(i);
1119 if (n->is_Phi()) {
1120 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1121 Node* m = n->fast_out(j);
1122 if (m->Opcode() == Op_ConvI2L)
1123 return false;
1124 if (m->is_CastII()) {
1125 return false;
1126 }
1127 }
1128 }
1129 }
1130 #endif
1131 return true;
1132 }
1133
1134
1135 //------------------------------place_near_use---------------------------------
1136 // Place some computation next to use but not inside inner loops.
1137 // For inner loop uses move it to the preheader area.
place_near_use(Node * useblock) const1138 Node *PhaseIdealLoop::place_near_use(Node *useblock) const {
1139 IdealLoopTree *u_loop = get_loop( useblock );
1140 if (u_loop->_irreducible) {
1141 return useblock;
1142 }
1143 if (u_loop->_child) {
1144 if (useblock == u_loop->_head && u_loop->_head->is_OuterStripMinedLoop()) {
1145 return u_loop->_head->in(LoopNode::EntryControl);
1146 }
1147 return useblock;
1148 }
1149 return u_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
1150 }
1151
1152
identical_backtoback_ifs(Node * n)1153 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) {
1154 if (!n->is_If() || n->is_CountedLoopEnd()) {
1155 return false;
1156 }
1157 if (!n->in(0)->is_Region()) {
1158 return false;
1159 }
1160 Node* region = n->in(0);
1161 Node* dom = idom(region);
1162 if (!dom->is_If() || dom->in(1) != n->in(1)) {
1163 return false;
1164 }
1165 IfNode* dom_if = dom->as_If();
1166 Node* proj_true = dom_if->proj_out(1);
1167 Node* proj_false = dom_if->proj_out(0);
1168
1169 for (uint i = 1; i < region->req(); i++) {
1170 if (is_dominator(proj_true, region->in(i))) {
1171 continue;
1172 }
1173 if (is_dominator(proj_false, region->in(i))) {
1174 continue;
1175 }
1176 return false;
1177 }
1178
1179 return true;
1180 }
1181
1182
can_split_if(Node * n_ctrl)1183 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
1184 if (must_throttle_split_if()) {
1185 return false;
1186 }
1187
1188 // Do not do 'split-if' if irreducible loops are present.
1189 if (_has_irreducible_loops) {
1190 return false;
1191 }
1192
1193 if (merge_point_too_heavy(C, n_ctrl)) {
1194 return false;
1195 }
1196
1197 // Do not do 'split-if' if some paths are dead. First do dead code
1198 // elimination and then see if its still profitable.
1199 for (uint i = 1; i < n_ctrl->req(); i++) {
1200 if (n_ctrl->in(i) == C->top()) {
1201 return false;
1202 }
1203 }
1204
1205 // If trying to do a 'Split-If' at the loop head, it is only
1206 // profitable if the cmp folds up on BOTH paths. Otherwise we
1207 // risk peeling a loop forever.
1208
1209 // CNC - Disabled for now. Requires careful handling of loop
1210 // body selection for the cloned code. Also, make sure we check
1211 // for any input path not being in the same loop as n_ctrl. For
1212 // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
1213 // because the alternative loop entry points won't be converted
1214 // into LoopNodes.
1215 IdealLoopTree *n_loop = get_loop(n_ctrl);
1216 for (uint j = 1; j < n_ctrl->req(); j++) {
1217 if (get_loop(n_ctrl->in(j)) != n_loop) {
1218 return false;
1219 }
1220 }
1221
1222 // Check for safety of the merge point.
1223 if (!merge_point_safe(n_ctrl)) {
1224 return false;
1225 }
1226
1227 return true;
1228 }
1229
1230 // Detect if the node is the inner strip-mined loop
1231 // Return: NULL if it's not the case, or the exit of outer strip-mined loop
is_inner_of_stripmined_loop(const Node * out)1232 static Node* is_inner_of_stripmined_loop(const Node* out) {
1233 Node* out_le = NULL;
1234
1235 if (out->is_CountedLoopEnd()) {
1236 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1237
1238 if (loop != NULL && loop->is_strip_mined()) {
1239 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1240 }
1241 }
1242
1243 return out_le;
1244 }
1245
1246 //------------------------------split_if_with_blocks_post----------------------
1247 // Do the real work in a non-recursive function. CFG hackery wants to be
1248 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1249 // info.
split_if_with_blocks_post(Node * n)1250 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1251
1252 // Cloning Cmp through Phi's involves the split-if transform.
1253 // FastLock is not used by an If
1254 if (n->is_Cmp() && !n->is_FastLock()) {
1255 Node *n_ctrl = get_ctrl(n);
1256 // Determine if the Node has inputs from some local Phi.
1257 // Returns the block to clone thru.
1258 Node *n_blk = has_local_phi_input(n);
1259 if (n_blk != n_ctrl) {
1260 return;
1261 }
1262
1263 if (!can_split_if(n_ctrl)) {
1264 return;
1265 }
1266
1267 if (n->outcnt() != 1) {
1268 return; // Multiple bool's from 1 compare?
1269 }
1270 Node *bol = n->unique_out();
1271 assert(bol->is_Bool(), "expect a bool here");
1272 if (bol->outcnt() != 1) {
1273 return;// Multiple branches from 1 compare?
1274 }
1275 Node *iff = bol->unique_out();
1276
1277 // Check some safety conditions
1278 if (iff->is_If()) { // Classic split-if?
1279 if (iff->in(0) != n_ctrl) {
1280 return; // Compare must be in same blk as if
1281 }
1282 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE
1283 // Can't split CMove with different control edge.
1284 if (iff->in(0) != NULL && iff->in(0) != n_ctrl ) {
1285 return;
1286 }
1287 if (get_ctrl(iff->in(2)) == n_ctrl ||
1288 get_ctrl(iff->in(3)) == n_ctrl) {
1289 return; // Inputs not yet split-up
1290 }
1291 if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) {
1292 return; // Loop-invar test gates loop-varying CMOVE
1293 }
1294 } else {
1295 return; // some other kind of node, such as an Allocate
1296 }
1297
1298 // When is split-if profitable? Every 'win' on means some control flow
1299 // goes dead, so it's almost always a win.
1300 int policy = 0;
1301 // Split compare 'n' through the merge point if it is profitable
1302 Node *phi = split_thru_phi( n, n_ctrl, policy);
1303 if (!phi) {
1304 return;
1305 }
1306
1307 // Found a Phi to split thru!
1308 // Replace 'n' with the new phi
1309 _igvn.replace_node(n, phi);
1310
1311 // Now split the bool up thru the phi
1312 Node *bolphi = split_thru_phi(bol, n_ctrl, -1);
1313 guarantee(bolphi != NULL, "null boolean phi node");
1314
1315 _igvn.replace_node(bol, bolphi);
1316 assert(iff->in(1) == bolphi, "");
1317
1318 if (bolphi->Value(&_igvn)->singleton()) {
1319 return;
1320 }
1321
1322 // Conditional-move? Must split up now
1323 if (!iff->is_If()) {
1324 Node *cmovphi = split_thru_phi(iff, n_ctrl, -1);
1325 _igvn.replace_node(iff, cmovphi);
1326 return;
1327 }
1328
1329 // Now split the IF
1330 do_split_if(iff);
1331 return;
1332 }
1333
1334 // Two identical ifs back to back can be merged
1335 if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) {
1336 Node *n_ctrl = n->in(0);
1337 PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1338 IfNode* dom_if = idom(n_ctrl)->as_If();
1339 Node* proj_true = dom_if->proj_out(1);
1340 Node* proj_false = dom_if->proj_out(0);
1341 Node* con_true = _igvn.makecon(TypeInt::ONE);
1342 Node* con_false = _igvn.makecon(TypeInt::ZERO);
1343
1344 for (uint i = 1; i < n_ctrl->req(); i++) {
1345 if (is_dominator(proj_true, n_ctrl->in(i))) {
1346 bolphi->init_req(i, con_true);
1347 } else {
1348 assert(is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1349 bolphi->init_req(i, con_false);
1350 }
1351 }
1352 register_new_node(bolphi, n_ctrl);
1353 _igvn.replace_input_of(n, 1, bolphi);
1354
1355 // Now split the IF
1356 do_split_if(n);
1357 return;
1358 }
1359
1360 // Check for an IF ready to split; one that has its
1361 // condition codes input coming from a Phi at the block start.
1362 int n_op = n->Opcode();
1363
1364 // Check for an IF being dominated by another IF same test
1365 if (n_op == Op_If ||
1366 n_op == Op_RangeCheck) {
1367 Node *bol = n->in(1);
1368 uint max = bol->outcnt();
1369 // Check for same test used more than once?
1370 if (max > 1 && bol->is_Bool()) {
1371 // Search up IDOMs to see if this IF is dominated.
1372 Node *cutoff = get_ctrl(bol);
1373
1374 // Now search up IDOMs till cutoff, looking for a dominating test
1375 Node *prevdom = n;
1376 Node *dom = idom(prevdom);
1377 while (dom != cutoff) {
1378 if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) {
1379 // It's invalid to move control dependent data nodes in the inner
1380 // strip-mined loop, because:
1381 // 1) break validation of LoopNode::verify_strip_mined()
1382 // 2) move code with side-effect in strip-mined loop
1383 // Move to the exit of outer strip-mined loop in that case.
1384 Node* out_le = is_inner_of_stripmined_loop(dom);
1385 if (out_le != NULL) {
1386 prevdom = out_le;
1387 }
1388 // Replace the dominated test with an obvious true or false.
1389 // Place it on the IGVN worklist for later cleanup.
1390 C->set_major_progress();
1391 dominated_by(prevdom, n, false, true);
1392 #ifndef PRODUCT
1393 if( VerifyLoopOptimizations ) verify();
1394 #endif
1395 return;
1396 }
1397 prevdom = dom;
1398 dom = idom(prevdom);
1399 }
1400 }
1401 }
1402
1403 // See if a shared loop-varying computation has no loop-varying uses.
1404 // Happens if something is only used for JVM state in uncommon trap exits,
1405 // like various versions of induction variable+offset. Clone the
1406 // computation per usage to allow it to sink out of the loop.
1407 if (has_ctrl(n) && !n->in(0)) {// n not dead and has no control edge (can float about)
1408 Node *n_ctrl = get_ctrl(n);
1409 IdealLoopTree *n_loop = get_loop(n_ctrl);
1410 if( n_loop != _ltree_root ) {
1411 DUIterator_Fast imax, i = n->fast_outs(imax);
1412 for (; i < imax; i++) {
1413 Node* u = n->fast_out(i);
1414 if( !has_ctrl(u) ) break; // Found control user
1415 IdealLoopTree *u_loop = get_loop(get_ctrl(u));
1416 if( u_loop == n_loop ) break; // Found loop-varying use
1417 if( n_loop->is_member( u_loop ) ) break; // Found use in inner loop
1418 if( u->Opcode() == Op_Opaque1 ) break; // Found loop limit, bugfix for 4677003
1419 }
1420 bool did_break = (i < imax); // Did we break out of the previous loop?
1421 if (!did_break && n->outcnt() > 1) { // All uses in outer loops!
1422 Node *late_load_ctrl = NULL;
1423 if (n->is_Load()) {
1424 // If n is a load, get and save the result from get_late_ctrl(),
1425 // to be later used in calculating the control for n's clones.
1426 clear_dom_lca_tags();
1427 late_load_ctrl = get_late_ctrl(n, n_ctrl);
1428 }
1429 // If n is a load, and the late control is the same as the current
1430 // control, then the cloning of n is a pointless exercise, because
1431 // GVN will ensure that we end up where we started.
1432 if (!n->is_Load() || (late_load_ctrl != n_ctrl && is_safe_load_ctrl(late_load_ctrl))) {
1433 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) {
1434 Node *u = n->last_out(j); // Clone private computation per use
1435 _igvn.rehash_node_delayed(u);
1436 Node *x = n->clone(); // Clone computation
1437 Node *x_ctrl = NULL;
1438 if( u->is_Phi() ) {
1439 // Replace all uses of normal nodes. Replace Phi uses
1440 // individually, so the separate Nodes can sink down
1441 // different paths.
1442 uint k = 1;
1443 while( u->in(k) != n ) k++;
1444 u->set_req( k, x );
1445 // x goes next to Phi input path
1446 x_ctrl = u->in(0)->in(k);
1447 --j;
1448 } else { // Normal use
1449 // Replace all uses
1450 for( uint k = 0; k < u->req(); k++ ) {
1451 if( u->in(k) == n ) {
1452 u->set_req( k, x );
1453 --j;
1454 }
1455 }
1456 x_ctrl = get_ctrl(u);
1457 }
1458
1459 // Find control for 'x' next to use but not inside inner loops.
1460 // For inner loop uses get the preheader area.
1461 x_ctrl = place_near_use(x_ctrl);
1462
1463 if (n->is_Load()) {
1464 // For loads, add a control edge to a CFG node outside of the loop
1465 // to force them to not combine and return back inside the loop
1466 // during GVN optimization (4641526).
1467 //
1468 // Because we are setting the actual control input, factor in
1469 // the result from get_late_ctrl() so we respect any
1470 // anti-dependences. (6233005).
1471 x_ctrl = dom_lca(late_load_ctrl, x_ctrl);
1472
1473 // Don't allow the control input to be a CFG splitting node.
1474 // Such nodes should only have ProjNodes as outs, e.g. IfNode
1475 // should only have IfTrueNode and IfFalseNode (4985384).
1476 x_ctrl = find_non_split_ctrl(x_ctrl);
1477
1478 IdealLoopTree* x_loop = get_loop(x_ctrl);
1479 Node* x_head = x_loop->_head;
1480 if (x_head->is_Loop() && (x_head->is_OuterStripMinedLoop() || x_head->as_Loop()->is_strip_mined()) && is_dominator(n_ctrl, x_head)) {
1481 // Anti dependence analysis is sometimes too
1482 // conservative: a store in the outer strip mined loop
1483 // can prevent a load from floating out of the outer
1484 // strip mined loop but the load may not be referenced
1485 // from the safepoint: loop strip mining verification
1486 // code reports a problem in that case. Make sure the
1487 // load is not moved in the outer strip mined loop in
1488 // that case.
1489 x_ctrl = x_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
1490 }
1491 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone");
1492
1493 x->set_req(0, x_ctrl);
1494 }
1495 register_new_node(x, x_ctrl);
1496
1497 // Some institutional knowledge is needed here: 'x' is
1498 // yanked because if the optimizer runs GVN on it all the
1499 // cloned x's will common up and undo this optimization and
1500 // be forced back in the loop.
1501 // I tried setting control edges on the x's to force them to
1502 // not combine, but the matching gets worried when it tries
1503 // to fold a StoreP and an AddP together (as part of an
1504 // address expression) and the AddP and StoreP have
1505 // different controls.
1506 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1507 if (!x->is_Load() && !x->is_DecodeNarrowPtr() && !x->is_AddP() && !bs->is_gc_barrier_node(x)) {
1508 _igvn._worklist.yank(x);
1509 }
1510 }
1511 _igvn.remove_dead_node(n);
1512 }
1513 }
1514 }
1515 }
1516
1517 try_move_store_after_loop(n);
1518
1519 // Check for Opaque2's who's loop has disappeared - who's input is in the
1520 // same loop nest as their output. Remove 'em, they are no longer useful.
1521 if( n_op == Op_Opaque2 &&
1522 n->in(1) != NULL &&
1523 get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
1524 _igvn.replace_node( n, n->in(1) );
1525 }
1526 }
1527
is_safe_load_ctrl(Node * ctrl)1528 bool PhaseIdealLoop::is_safe_load_ctrl(Node* ctrl) {
1529 if (ctrl->is_Proj() && ctrl->in(0)->is_Call() && ctrl->has_out_with(Op_Catch)) {
1530 return false;
1531 }
1532 return true;
1533 }
1534
1535 //------------------------------split_if_with_blocks---------------------------
1536 // Check for aggressive application of 'split-if' optimization,
1537 // using basic block level info.
split_if_with_blocks(VectorSet & visited,Node_Stack & nstack)1538 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack) {
1539 Node* root = C->root();
1540 visited.set(root->_idx); // first, mark root as visited
1541 // Do pre-visit work for root
1542 Node* n = split_if_with_blocks_pre(root);
1543 uint cnt = n->outcnt();
1544 uint i = 0;
1545
1546 while (true) {
1547 // Visit all children
1548 if (i < cnt) {
1549 Node* use = n->raw_out(i);
1550 ++i;
1551 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) {
1552 // Now do pre-visit work for this use
1553 use = split_if_with_blocks_pre(use);
1554 nstack.push(n, i); // Save parent and next use's index.
1555 n = use; // Process all children of current use.
1556 cnt = use->outcnt();
1557 i = 0;
1558 }
1559 }
1560 else {
1561 // All of n's children have been processed, complete post-processing.
1562 if (cnt != 0 && !n->is_Con()) {
1563 assert(has_node(n), "no dead nodes");
1564 split_if_with_blocks_post(n);
1565 }
1566 if (must_throttle_split_if()) {
1567 nstack.clear();
1568 }
1569 if (nstack.is_empty()) {
1570 // Finished all nodes on stack.
1571 break;
1572 }
1573 // Get saved parent node and next use's index. Visit the rest of uses.
1574 n = nstack.node();
1575 cnt = n->outcnt();
1576 i = nstack.index();
1577 nstack.pop();
1578 }
1579 }
1580 }
1581
1582
1583 //=============================================================================
1584 //
1585 // C L O N E A L O O P B O D Y
1586 //
1587
1588 //------------------------------clone_iff--------------------------------------
1589 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1590 // "Nearly" because all Nodes have been cloned from the original in the loop,
1591 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
1592 // through the Phi recursively, and return a Bool.
clone_iff(PhiNode * phi,IdealLoopTree * loop)1593 Node* PhaseIdealLoop::clone_iff(PhiNode *phi, IdealLoopTree *loop) {
1594
1595 // Convert this Phi into a Phi merging Bools
1596 uint i;
1597 for (i = 1; i < phi->req(); i++) {
1598 Node *b = phi->in(i);
1599 if (b->is_Phi()) {
1600 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi(), loop));
1601 } else {
1602 assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
1603 }
1604 }
1605
1606 Node* n = phi->in(1);
1607 Node* sample_opaque = NULL;
1608 Node *sample_bool = NULL;
1609 if (n->Opcode() == Op_Opaque4) {
1610 sample_opaque = n;
1611 sample_bool = n->in(1);
1612 assert(sample_bool->is_Bool(), "wrong type");
1613 } else {
1614 sample_bool = n;
1615 }
1616 Node *sample_cmp = sample_bool->in(1);
1617
1618 // Make Phis to merge the Cmp's inputs.
1619 PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
1620 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
1621 for (i = 1; i < phi->req(); i++) {
1622 Node *n1 = sample_opaque == NULL ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
1623 Node *n2 = sample_opaque == NULL ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
1624 phi1->set_req(i, n1);
1625 phi2->set_req(i, n2);
1626 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1627 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1628 }
1629 // See if these Phis have been made before.
1630 // Register with optimizer
1631 Node *hit1 = _igvn.hash_find_insert(phi1);
1632 if (hit1) { // Hit, toss just made Phi
1633 _igvn.remove_dead_node(phi1); // Remove new phi
1634 assert(hit1->is_Phi(), "" );
1635 phi1 = (PhiNode*)hit1; // Use existing phi
1636 } else { // Miss
1637 _igvn.register_new_node_with_optimizer(phi1);
1638 }
1639 Node *hit2 = _igvn.hash_find_insert(phi2);
1640 if (hit2) { // Hit, toss just made Phi
1641 _igvn.remove_dead_node(phi2); // Remove new phi
1642 assert(hit2->is_Phi(), "" );
1643 phi2 = (PhiNode*)hit2; // Use existing phi
1644 } else { // Miss
1645 _igvn.register_new_node_with_optimizer(phi2);
1646 }
1647 // Register Phis with loop/block info
1648 set_ctrl(phi1, phi->in(0));
1649 set_ctrl(phi2, phi->in(0));
1650 // Make a new Cmp
1651 Node *cmp = sample_cmp->clone();
1652 cmp->set_req(1, phi1);
1653 cmp->set_req(2, phi2);
1654 _igvn.register_new_node_with_optimizer(cmp);
1655 set_ctrl(cmp, phi->in(0));
1656
1657 // Make a new Bool
1658 Node *b = sample_bool->clone();
1659 b->set_req(1,cmp);
1660 _igvn.register_new_node_with_optimizer(b);
1661 set_ctrl(b, phi->in(0));
1662
1663 if (sample_opaque != NULL) {
1664 Node* opaque = sample_opaque->clone();
1665 opaque->set_req(1, b);
1666 _igvn.register_new_node_with_optimizer(opaque);
1667 set_ctrl(opaque, phi->in(0));
1668 return opaque;
1669 }
1670
1671 assert(b->is_Bool(), "");
1672 return b;
1673 }
1674
1675 //------------------------------clone_bool-------------------------------------
1676 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1677 // "Nearly" because all Nodes have been cloned from the original in the loop,
1678 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
1679 // through the Phi recursively, and return a Bool.
clone_bool(PhiNode * phi,IdealLoopTree * loop)1680 CmpNode *PhaseIdealLoop::clone_bool( PhiNode *phi, IdealLoopTree *loop ) {
1681 uint i;
1682 // Convert this Phi into a Phi merging Bools
1683 for( i = 1; i < phi->req(); i++ ) {
1684 Node *b = phi->in(i);
1685 if( b->is_Phi() ) {
1686 _igvn.replace_input_of(phi, i, clone_bool( b->as_Phi(), loop ));
1687 } else {
1688 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
1689 }
1690 }
1691
1692 Node *sample_cmp = phi->in(1);
1693
1694 // Make Phis to merge the Cmp's inputs.
1695 PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP );
1696 PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP );
1697 for( uint j = 1; j < phi->req(); j++ ) {
1698 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP
1699 Node *n1, *n2;
1700 if( cmp_top->is_Cmp() ) {
1701 n1 = cmp_top->in(1);
1702 n2 = cmp_top->in(2);
1703 } else {
1704 n1 = n2 = cmp_top;
1705 }
1706 phi1->set_req( j, n1 );
1707 phi2->set_req( j, n2 );
1708 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1709 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1710 }
1711
1712 // See if these Phis have been made before.
1713 // Register with optimizer
1714 Node *hit1 = _igvn.hash_find_insert(phi1);
1715 if( hit1 ) { // Hit, toss just made Phi
1716 _igvn.remove_dead_node(phi1); // Remove new phi
1717 assert( hit1->is_Phi(), "" );
1718 phi1 = (PhiNode*)hit1; // Use existing phi
1719 } else { // Miss
1720 _igvn.register_new_node_with_optimizer(phi1);
1721 }
1722 Node *hit2 = _igvn.hash_find_insert(phi2);
1723 if( hit2 ) { // Hit, toss just made Phi
1724 _igvn.remove_dead_node(phi2); // Remove new phi
1725 assert( hit2->is_Phi(), "" );
1726 phi2 = (PhiNode*)hit2; // Use existing phi
1727 } else { // Miss
1728 _igvn.register_new_node_with_optimizer(phi2);
1729 }
1730 // Register Phis with loop/block info
1731 set_ctrl(phi1, phi->in(0));
1732 set_ctrl(phi2, phi->in(0));
1733 // Make a new Cmp
1734 Node *cmp = sample_cmp->clone();
1735 cmp->set_req( 1, phi1 );
1736 cmp->set_req( 2, phi2 );
1737 _igvn.register_new_node_with_optimizer(cmp);
1738 set_ctrl(cmp, phi->in(0));
1739
1740 assert( cmp->is_Cmp(), "" );
1741 return (CmpNode*)cmp;
1742 }
1743
1744 //------------------------------sink_use---------------------------------------
1745 // If 'use' was in the loop-exit block, it now needs to be sunk
1746 // below the post-loop merge point.
sink_use(Node * use,Node * post_loop)1747 void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) {
1748 if (!use->is_CFG() && get_ctrl(use) == post_loop->in(2)) {
1749 set_ctrl(use, post_loop);
1750 for (DUIterator j = use->outs(); use->has_out(j); j++)
1751 sink_use(use->out(j), post_loop);
1752 }
1753 }
1754
clone_loop_handle_data_uses(Node * old,Node_List & old_new,IdealLoopTree * loop,IdealLoopTree * outer_loop,Node_List * & split_if_set,Node_List * & split_bool_set,Node_List * & split_cex_set,Node_List & worklist,uint new_counter,CloneLoopMode mode)1755 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
1756 IdealLoopTree* loop, IdealLoopTree* outer_loop,
1757 Node_List*& split_if_set, Node_List*& split_bool_set,
1758 Node_List*& split_cex_set, Node_List& worklist,
1759 uint new_counter, CloneLoopMode mode) {
1760 Node* nnn = old_new[old->_idx];
1761 // Copy uses to a worklist, so I can munge the def-use info
1762 // with impunity.
1763 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
1764 worklist.push(old->fast_out(j));
1765
1766 while( worklist.size() ) {
1767 Node *use = worklist.pop();
1768 if (!has_node(use)) continue; // Ignore dead nodes
1769 if (use->in(0) == C->top()) continue;
1770 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
1771 // Check for data-use outside of loop - at least one of OLD or USE
1772 // must not be a CFG node.
1773 #ifdef ASSERT
1774 if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == NULL) {
1775 Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint();
1776 assert(mode != IgnoreStripMined, "incorrect cloning mode");
1777 assert((mode == ControlAroundStripMined && use == sfpt) || !use->is_reachable_from_root(), "missed a node");
1778 }
1779 #endif
1780 if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) {
1781
1782 // If the Data use is an IF, that means we have an IF outside of the
1783 // loop that is switching on a condition that is set inside of the
1784 // loop. Happens if people set a loop-exit flag; then test the flag
1785 // in the loop to break the loop, then test is again outside of the
1786 // loop to determine which way the loop exited.
1787 // Loop predicate If node connects to Bool node through Opaque1 node.
1788 if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use) || use->Opcode() == Op_Opaque4) {
1789 // Since this code is highly unlikely, we lazily build the worklist
1790 // of such Nodes to go split.
1791 if (!split_if_set) {
1792 split_if_set = new Node_List();
1793 }
1794 split_if_set->push(use);
1795 }
1796 if (use->is_Bool()) {
1797 if (!split_bool_set) {
1798 split_bool_set = new Node_List();
1799 }
1800 split_bool_set->push(use);
1801 }
1802 if (use->Opcode() == Op_CreateEx) {
1803 if (!split_cex_set) {
1804 split_cex_set = new Node_List();
1805 }
1806 split_cex_set->push(use);
1807 }
1808
1809
1810 // Get "block" use is in
1811 uint idx = 0;
1812 while( use->in(idx) != old ) idx++;
1813 Node *prev = use->is_CFG() ? use : get_ctrl(use);
1814 assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
1815 Node *cfg = prev->_idx >= new_counter
1816 ? prev->in(2)
1817 : idom(prev);
1818 if( use->is_Phi() ) // Phi use is in prior block
1819 cfg = prev->in(idx); // NOT in block of Phi itself
1820 if (cfg->is_top()) { // Use is dead?
1821 _igvn.replace_input_of(use, idx, C->top());
1822 continue;
1823 }
1824
1825 // If use is referenced through control edge... (idx == 0)
1826 if (mode == IgnoreStripMined && idx == 0) {
1827 LoopNode *head = loop->_head->as_Loop();
1828 if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) {
1829 // That node is outside the inner loop, leave it outside the
1830 // outer loop as well to not confuse verification code.
1831 assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop");
1832 _igvn.replace_input_of(use, 0, head->outer_loop_exit());
1833 continue;
1834 }
1835 }
1836
1837 while(!outer_loop->is_member(get_loop(cfg))) {
1838 prev = cfg;
1839 cfg = cfg->_idx >= new_counter ? cfg->in(2) : idom(cfg);
1840 }
1841 // If the use occurs after merging several exits from the loop, then
1842 // old value must have dominated all those exits. Since the same old
1843 // value was used on all those exits we did not need a Phi at this
1844 // merge point. NOW we do need a Phi here. Each loop exit value
1845 // is now merged with the peeled body exit; each exit gets its own
1846 // private Phi and those Phis need to be merged here.
1847 Node *phi;
1848 if( prev->is_Region() ) {
1849 if( idx == 0 ) { // Updating control edge?
1850 phi = prev; // Just use existing control
1851 } else { // Else need a new Phi
1852 phi = PhiNode::make( prev, old );
1853 // Now recursively fix up the new uses of old!
1854 for( uint i = 1; i < prev->req(); i++ ) {
1855 worklist.push(phi); // Onto worklist once for each 'old' input
1856 }
1857 }
1858 } else {
1859 // Get new RegionNode merging old and new loop exits
1860 prev = old_new[prev->_idx];
1861 assert( prev, "just made this in step 7" );
1862 if( idx == 0) { // Updating control edge?
1863 phi = prev; // Just use existing control
1864 } else { // Else need a new Phi
1865 // Make a new Phi merging data values properly
1866 phi = PhiNode::make( prev, old );
1867 phi->set_req( 1, nnn );
1868 }
1869 }
1870 // If inserting a new Phi, check for prior hits
1871 if( idx != 0 ) {
1872 Node *hit = _igvn.hash_find_insert(phi);
1873 if( hit == NULL ) {
1874 _igvn.register_new_node_with_optimizer(phi); // Register new phi
1875 } else { // or
1876 // Remove the new phi from the graph and use the hit
1877 _igvn.remove_dead_node(phi);
1878 phi = hit; // Use existing phi
1879 }
1880 set_ctrl(phi, prev);
1881 }
1882 // Make 'use' use the Phi instead of the old loop body exit value
1883 _igvn.replace_input_of(use, idx, phi);
1884 if( use->_idx >= new_counter ) { // If updating new phis
1885 // Not needed for correctness, but prevents a weak assert
1886 // in AddPNode from tripping (when we end up with different
1887 // base & derived Phis that will become the same after
1888 // IGVN does CSE).
1889 Node *hit = _igvn.hash_find_insert(use);
1890 if( hit ) // Go ahead and re-hash for hits.
1891 _igvn.replace_node( use, hit );
1892 }
1893
1894 // If 'use' was in the loop-exit block, it now needs to be sunk
1895 // below the post-loop merge point.
1896 sink_use( use, prev );
1897 }
1898 }
1899 }
1900
clone_outer_loop_helper(Node * n,const IdealLoopTree * loop,const IdealLoopTree * outer_loop,const Node_List & old_new,Unique_Node_List & wq,PhaseIdealLoop * phase,bool check_old_new)1901 static void clone_outer_loop_helper(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop,
1902 const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase,
1903 bool check_old_new) {
1904 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1905 Node* u = n->fast_out(j);
1906 assert(check_old_new || old_new[u->_idx] == NULL, "shouldn't have been cloned");
1907 if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == NULL)) {
1908 Node* c = phase->get_ctrl(u);
1909 IdealLoopTree* u_loop = phase->get_loop(c);
1910 assert(!loop->is_member(u_loop), "can be in outer loop or out of both loops only");
1911 if (outer_loop->is_member(u_loop)) {
1912 wq.push(u);
1913 }
1914 }
1915 }
1916 }
1917
clone_outer_loop(LoopNode * head,CloneLoopMode mode,IdealLoopTree * loop,IdealLoopTree * outer_loop,int dd,Node_List & old_new,Node_List & extra_data_nodes)1918 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
1919 IdealLoopTree* outer_loop, int dd, Node_List &old_new,
1920 Node_List& extra_data_nodes) {
1921 if (head->is_strip_mined() && mode != IgnoreStripMined) {
1922 CountedLoopNode* cl = head->as_CountedLoop();
1923 Node* l = cl->outer_loop();
1924 Node* tail = cl->outer_loop_tail();
1925 IfNode* le = cl->outer_loop_end();
1926 Node* sfpt = cl->outer_safepoint();
1927 CountedLoopEndNode* cle = cl->loopexit();
1928 CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
1929 CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null();
1930 Node* cle_out = cle->proj_out(false);
1931
1932 Node* new_sfpt = NULL;
1933 Node* new_cle_out = cle_out->clone();
1934 old_new.map(cle_out->_idx, new_cle_out);
1935 if (mode == CloneIncludesStripMined) {
1936 // clone outer loop body
1937 Node* new_l = l->clone();
1938 Node* new_tail = tail->clone();
1939 IfNode* new_le = le->clone()->as_If();
1940 new_sfpt = sfpt->clone();
1941
1942 set_loop(new_l, outer_loop->_parent);
1943 set_idom(new_l, new_l->in(LoopNode::EntryControl), dd);
1944 set_loop(new_cle_out, outer_loop->_parent);
1945 set_idom(new_cle_out, new_cle, dd);
1946 set_loop(new_sfpt, outer_loop->_parent);
1947 set_idom(new_sfpt, new_cle_out, dd);
1948 set_loop(new_le, outer_loop->_parent);
1949 set_idom(new_le, new_sfpt, dd);
1950 set_loop(new_tail, outer_loop->_parent);
1951 set_idom(new_tail, new_le, dd);
1952 set_idom(new_cl, new_l, dd);
1953
1954 old_new.map(l->_idx, new_l);
1955 old_new.map(tail->_idx, new_tail);
1956 old_new.map(le->_idx, new_le);
1957 old_new.map(sfpt->_idx, new_sfpt);
1958
1959 new_l->set_req(LoopNode::LoopBackControl, new_tail);
1960 new_l->set_req(0, new_l);
1961 new_tail->set_req(0, new_le);
1962 new_le->set_req(0, new_sfpt);
1963 new_sfpt->set_req(0, new_cle_out);
1964 new_cle_out->set_req(0, new_cle);
1965 new_cl->set_req(LoopNode::EntryControl, new_l);
1966
1967 _igvn.register_new_node_with_optimizer(new_l);
1968 _igvn.register_new_node_with_optimizer(new_tail);
1969 _igvn.register_new_node_with_optimizer(new_le);
1970 } else {
1971 Node *newhead = old_new[loop->_head->_idx];
1972 newhead->as_Loop()->clear_strip_mined();
1973 _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl));
1974 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
1975 }
1976 // Look at data node that were assigned a control in the outer
1977 // loop: they are kept in the outer loop by the safepoint so start
1978 // from the safepoint node's inputs.
1979 IdealLoopTree* outer_loop = get_loop(l);
1980 Node_Stack stack(2);
1981 stack.push(sfpt, 1);
1982 uint new_counter = C->unique();
1983 while (stack.size() > 0) {
1984 Node* n = stack.node();
1985 uint i = stack.index();
1986 while (i < n->req() &&
1987 (n->in(i) == NULL ||
1988 !has_ctrl(n->in(i)) ||
1989 get_loop(get_ctrl(n->in(i))) != outer_loop ||
1990 (old_new[n->in(i)->_idx] != NULL && old_new[n->in(i)->_idx]->_idx >= new_counter))) {
1991 i++;
1992 }
1993 if (i < n->req()) {
1994 stack.set_index(i+1);
1995 stack.push(n->in(i), 0);
1996 } else {
1997 assert(old_new[n->_idx] == NULL || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet");
1998 Node* m = n == sfpt ? new_sfpt : n->clone();
1999 if (m != NULL) {
2000 for (uint i = 0; i < n->req(); i++) {
2001 if (m->in(i) != NULL && old_new[m->in(i)->_idx] != NULL) {
2002 m->set_req(i, old_new[m->in(i)->_idx]);
2003 }
2004 }
2005 } else {
2006 assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?");
2007 }
2008 if (n != sfpt) {
2009 extra_data_nodes.push(n);
2010 _igvn.register_new_node_with_optimizer(m);
2011 assert(get_ctrl(n) == cle_out, "what other control?");
2012 set_ctrl(m, new_cle_out);
2013 old_new.map(n->_idx, m);
2014 }
2015 stack.pop();
2016 }
2017 }
2018 if (mode == CloneIncludesStripMined) {
2019 _igvn.register_new_node_with_optimizer(new_sfpt);
2020 _igvn.register_new_node_with_optimizer(new_cle_out);
2021 }
2022 // Some other transformation may have pessimistically assign some
2023 // data nodes to the outer loop. Set their control so they are out
2024 // of the outer loop.
2025 ResourceMark rm;
2026 Unique_Node_List wq;
2027 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2028 Node* old = extra_data_nodes.at(i);
2029 clone_outer_loop_helper(old, loop, outer_loop, old_new, wq, this, true);
2030 }
2031 Node* new_ctrl = cl->outer_loop_exit();
2032 assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest");
2033 for (uint i = 0; i < wq.size(); i++) {
2034 Node* n = wq.at(i);
2035 set_ctrl(n, new_ctrl);
2036 clone_outer_loop_helper(n, loop, outer_loop, old_new, wq, this, false);
2037 }
2038 } else {
2039 Node *newhead = old_new[loop->_head->_idx];
2040 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2041 }
2042 }
2043
2044 //------------------------------clone_loop-------------------------------------
2045 //
2046 // C L O N E A L O O P B O D Y
2047 //
2048 // This is the basic building block of the loop optimizations. It clones an
2049 // entire loop body. It makes an old_new loop body mapping; with this mapping
2050 // you can find the new-loop equivalent to an old-loop node. All new-loop
2051 // nodes are exactly equal to their old-loop counterparts, all edges are the
2052 // same. All exits from the old-loop now have a RegionNode that merges the
2053 // equivalent new-loop path. This is true even for the normal "loop-exit"
2054 // condition. All uses of loop-invariant old-loop values now come from (one
2055 // or more) Phis that merge their new-loop equivalents.
2056 //
2057 // This operation leaves the graph in an illegal state: there are two valid
2058 // control edges coming from the loop pre-header to both loop bodies. I'll
2059 // definitely have to hack the graph after running this transform.
2060 //
2061 // From this building block I will further edit edges to perform loop peeling
2062 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc.
2063 //
2064 // Parameter side_by_size_idom:
2065 // When side_by_size_idom is NULL, the dominator tree is constructed for
2066 // the clone loop to dominate the original. Used in construction of
2067 // pre-main-post loop sequence.
2068 // When nonnull, the clone and original are side-by-side, both are
2069 // dominated by the side_by_side_idom node. Used in construction of
2070 // unswitched loops.
clone_loop(IdealLoopTree * loop,Node_List & old_new,int dd,CloneLoopMode mode,Node * side_by_side_idom)2071 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
2072 CloneLoopMode mode, Node* side_by_side_idom) {
2073
2074 LoopNode* head = loop->_head->as_Loop();
2075 head->verify_strip_mined(1);
2076
2077 if (C->do_vector_loop() && PrintOpto) {
2078 const char* mname = C->method()->name()->as_quoted_ascii();
2079 if (mname != NULL) {
2080 tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname);
2081 }
2082 }
2083
2084 CloneMap& cm = C->clone_map();
2085 Dict* dict = cm.dict();
2086 if (C->do_vector_loop()) {
2087 cm.set_clone_idx(cm.max_gen()+1);
2088 #ifndef PRODUCT
2089 if (PrintOpto) {
2090 tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx());
2091 loop->dump_head();
2092 }
2093 #endif
2094 }
2095
2096 // Step 1: Clone the loop body. Make the old->new mapping.
2097 uint i;
2098 for( i = 0; i < loop->_body.size(); i++ ) {
2099 Node *old = loop->_body.at(i);
2100 Node *nnn = old->clone();
2101 old_new.map( old->_idx, nnn );
2102 if (C->do_vector_loop()) {
2103 cm.verify_insert_and_clone(old, nnn, cm.clone_idx());
2104 }
2105 _igvn.register_new_node_with_optimizer(nnn);
2106 }
2107
2108 IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop;
2109
2110 // Step 2: Fix the edges in the new body. If the old input is outside the
2111 // loop use it. If the old input is INside the loop, use the corresponding
2112 // new node instead.
2113 for( i = 0; i < loop->_body.size(); i++ ) {
2114 Node *old = loop->_body.at(i);
2115 Node *nnn = old_new[old->_idx];
2116 // Fix CFG/Loop controlling the new node
2117 if (has_ctrl(old)) {
2118 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]);
2119 } else {
2120 set_loop(nnn, outer_loop->_parent);
2121 if (old->outcnt() > 0) {
2122 set_idom( nnn, old_new[idom(old)->_idx], dd );
2123 }
2124 }
2125 // Correct edges to the new node
2126 for( uint j = 0; j < nnn->req(); j++ ) {
2127 Node *n = nnn->in(j);
2128 if( n ) {
2129 IdealLoopTree *old_in_loop = get_loop( has_ctrl(n) ? get_ctrl(n) : n );
2130 if( loop->is_member( old_in_loop ) )
2131 nnn->set_req(j, old_new[n->_idx]);
2132 }
2133 }
2134 _igvn.hash_find_insert(nnn);
2135 }
2136
2137 Node_List extra_data_nodes; // data nodes in the outer strip mined loop
2138 clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes);
2139
2140 // Step 3: Now fix control uses. Loop varying control uses have already
2141 // been fixed up (as part of all input edges in Step 2). Loop invariant
2142 // control uses must be either an IfFalse or an IfTrue. Make a merge
2143 // point to merge the old and new IfFalse/IfTrue nodes; make the use
2144 // refer to this.
2145 Node_List worklist;
2146 uint new_counter = C->unique();
2147 for( i = 0; i < loop->_body.size(); i++ ) {
2148 Node* old = loop->_body.at(i);
2149 if( !old->is_CFG() ) continue;
2150
2151 // Copy uses to a worklist, so I can munge the def-use info
2152 // with impunity.
2153 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
2154 worklist.push(old->fast_out(j));
2155
2156 while( worklist.size() ) { // Visit all uses
2157 Node *use = worklist.pop();
2158 if (!has_node(use)) continue; // Ignore dead nodes
2159 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
2160 if( !loop->is_member( use_loop ) && use->is_CFG() ) {
2161 // Both OLD and USE are CFG nodes here.
2162 assert( use->is_Proj(), "" );
2163 Node* nnn = old_new[old->_idx];
2164
2165 Node* newuse = NULL;
2166 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2167 CountedLoopNode* cl = head->as_CountedLoop();
2168 CountedLoopEndNode* cle = cl->loopexit();
2169 Node* cle_out = cle->proj_out_or_null(false);
2170 if (use == cle_out) {
2171 IfNode* le = cl->outer_loop_end();
2172 use = le->proj_out(false);
2173 use_loop = get_loop(use);
2174 if (mode == CloneIncludesStripMined) {
2175 nnn = old_new[le->_idx];
2176 } else {
2177 newuse = old_new[cle_out->_idx];
2178 }
2179 }
2180 }
2181 if (newuse == NULL) {
2182 newuse = use->clone();
2183 }
2184
2185 // Clone the loop exit control projection
2186 if (C->do_vector_loop()) {
2187 cm.verify_insert_and_clone(use, newuse, cm.clone_idx());
2188 }
2189 newuse->set_req(0,nnn);
2190 _igvn.register_new_node_with_optimizer(newuse);
2191 set_loop(newuse, use_loop);
2192 set_idom(newuse, nnn, dom_depth(nnn) + 1 );
2193
2194 // We need a Region to merge the exit from the peeled body and the
2195 // exit from the old loop body.
2196 RegionNode *r = new RegionNode(3);
2197 // Map the old use to the new merge point
2198 old_new.map( use->_idx, r );
2199 uint dd_r = MIN2(dom_depth(newuse),dom_depth(use));
2200 assert( dd_r >= dom_depth(dom_lca(newuse,use)), "" );
2201
2202 // The original user of 'use' uses 'r' instead.
2203 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
2204 Node* useuse = use->last_out(l);
2205 _igvn.rehash_node_delayed(useuse);
2206 uint uses_found = 0;
2207 if( useuse->in(0) == use ) {
2208 useuse->set_req(0, r);
2209 uses_found++;
2210 if( useuse->is_CFG() ) {
2211 // This is not a dom_depth > dd_r because when new
2212 // control flow is constructed by a loop opt, a node and
2213 // its dominator can end up at the same dom_depth
2214 assert(dom_depth(useuse) >= dd_r, "");
2215 set_idom(useuse, r, dom_depth(useuse));
2216 }
2217 }
2218 for( uint k = 1; k < useuse->req(); k++ ) {
2219 if( useuse->in(k) == use ) {
2220 useuse->set_req(k, r);
2221 uses_found++;
2222 if (useuse->is_Loop() && k == LoopNode::EntryControl) {
2223 // This is not a dom_depth > dd_r because when new
2224 // control flow is constructed by a loop opt, a node
2225 // and its dominator can end up at the same dom_depth
2226 assert(dom_depth(useuse) >= dd_r , "");
2227 set_idom(useuse, r, dom_depth(useuse));
2228 }
2229 }
2230 }
2231 l -= uses_found; // we deleted 1 or more copies of this edge
2232 }
2233
2234 // Now finish up 'r'
2235 r->set_req( 1, newuse );
2236 r->set_req( 2, use );
2237 _igvn.register_new_node_with_optimizer(r);
2238 set_loop(r, use_loop);
2239 set_idom(r, !side_by_side_idom ? newuse->in(0) : side_by_side_idom, dd_r);
2240 } // End of if a loop-exit test
2241 }
2242 }
2243
2244 // Step 4: If loop-invariant use is not control, it must be dominated by a
2245 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region
2246 // there if needed. Make a Phi there merging old and new used values.
2247 Node_List *split_if_set = NULL;
2248 Node_List *split_bool_set = NULL;
2249 Node_List *split_cex_set = NULL;
2250 for( i = 0; i < loop->_body.size(); i++ ) {
2251 Node* old = loop->_body.at(i);
2252 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2253 split_bool_set, split_cex_set, worklist, new_counter,
2254 mode);
2255 }
2256
2257 for (i = 0; i < extra_data_nodes.size(); i++) {
2258 Node* old = extra_data_nodes.at(i);
2259 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2260 split_bool_set, split_cex_set, worklist, new_counter,
2261 mode);
2262 }
2263
2264 // Check for IFs that need splitting/cloning. Happens if an IF outside of
2265 // the loop uses a condition set in the loop. The original IF probably
2266 // takes control from one or more OLD Regions (which in turn get from NEW
2267 // Regions). In any case, there will be a set of Phis for each merge point
2268 // from the IF up to where the original BOOL def exists the loop.
2269 if (split_if_set) {
2270 while (split_if_set->size()) {
2271 Node *iff = split_if_set->pop();
2272 if (iff->in(1)->is_Phi()) {
2273 Node *b = clone_iff(iff->in(1)->as_Phi(), loop);
2274 _igvn.replace_input_of(iff, 1, b);
2275 }
2276 }
2277 }
2278 if (split_bool_set) {
2279 while (split_bool_set->size()) {
2280 Node *b = split_bool_set->pop();
2281 Node *phi = b->in(1);
2282 assert(phi->is_Phi(), "");
2283 CmpNode *cmp = clone_bool((PhiNode*)phi, loop);
2284 _igvn.replace_input_of(b, 1, cmp);
2285 }
2286 }
2287 if (split_cex_set) {
2288 while (split_cex_set->size()) {
2289 Node *b = split_cex_set->pop();
2290 assert(b->in(0)->is_Region(), "");
2291 assert(b->in(1)->is_Phi(), "");
2292 assert(b->in(0)->in(0) == b->in(1)->in(0), "");
2293 split_up(b, b->in(0), NULL);
2294 }
2295 }
2296
2297 }
2298
2299
2300 //---------------------- stride_of_possible_iv -------------------------------------
2301 // Looks for an iff/bool/comp with one operand of the compare
2302 // being a cycle involving an add and a phi,
2303 // with an optional truncation (left-shift followed by a right-shift)
2304 // of the add. Returns zero if not an iv.
stride_of_possible_iv(Node * iff)2305 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) {
2306 Node* trunc1 = NULL;
2307 Node* trunc2 = NULL;
2308 const TypeInteger* ttype = NULL;
2309 if (!iff->is_If() || iff->in(1) == NULL || !iff->in(1)->is_Bool()) {
2310 return 0;
2311 }
2312 BoolNode* bl = iff->in(1)->as_Bool();
2313 Node* cmp = bl->in(1);
2314 if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) {
2315 return 0;
2316 }
2317 // Must have an invariant operand
2318 if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) {
2319 return 0;
2320 }
2321 Node* add2 = NULL;
2322 Node* cmp1 = cmp->in(1);
2323 if (cmp1->is_Phi()) {
2324 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) )))
2325 Node* phi = cmp1;
2326 for (uint i = 1; i < phi->req(); i++) {
2327 Node* in = phi->in(i);
2328 Node* add = CountedLoopNode::match_incr_with_optional_truncation(in,
2329 &trunc1, &trunc2, &ttype, T_INT);
2330 if (add && add->in(1) == phi) {
2331 add2 = add->in(2);
2332 break;
2333 }
2334 }
2335 } else {
2336 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) )))
2337 Node* addtrunc = cmp1;
2338 Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc,
2339 &trunc1, &trunc2, &ttype, T_INT);
2340 if (add && add->in(1)->is_Phi()) {
2341 Node* phi = add->in(1);
2342 for (uint i = 1; i < phi->req(); i++) {
2343 if (phi->in(i) == addtrunc) {
2344 add2 = add->in(2);
2345 break;
2346 }
2347 }
2348 }
2349 }
2350 if (add2 != NULL) {
2351 const TypeInt* add2t = _igvn.type(add2)->is_int();
2352 if (add2t->is_con()) {
2353 return add2t->get_con();
2354 }
2355 }
2356 return 0;
2357 }
2358
2359
2360 //---------------------- stay_in_loop -------------------------------------
2361 // Return the (unique) control output node that's in the loop (if it exists.)
stay_in_loop(Node * n,IdealLoopTree * loop)2362 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) {
2363 Node* unique = NULL;
2364 if (!n) return NULL;
2365 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2366 Node* use = n->fast_out(i);
2367 if (!has_ctrl(use) && loop->is_member(get_loop(use))) {
2368 if (unique != NULL) {
2369 return NULL;
2370 }
2371 unique = use;
2372 }
2373 }
2374 return unique;
2375 }
2376
2377 //------------------------------ register_node -------------------------------------
2378 // Utility to register node "n" with PhaseIdealLoop
register_node(Node * n,IdealLoopTree * loop,Node * pred,int ddepth)2379 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth) {
2380 _igvn.register_new_node_with_optimizer(n);
2381 loop->_body.push(n);
2382 if (n->is_CFG()) {
2383 set_loop(n, loop);
2384 set_idom(n, pred, ddepth);
2385 } else {
2386 set_ctrl(n, pred);
2387 }
2388 }
2389
2390 //------------------------------ proj_clone -------------------------------------
2391 // Utility to create an if-projection
proj_clone(ProjNode * p,IfNode * iff)2392 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) {
2393 ProjNode* c = p->clone()->as_Proj();
2394 c->set_req(0, iff);
2395 return c;
2396 }
2397
2398 //------------------------------ short_circuit_if -------------------------------------
2399 // Force the iff control output to be the live_proj
short_circuit_if(IfNode * iff,ProjNode * live_proj)2400 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) {
2401 guarantee(live_proj != NULL, "null projection");
2402 int proj_con = live_proj->_con;
2403 assert(proj_con == 0 || proj_con == 1, "false or true projection");
2404 Node *con = _igvn.intcon(proj_con);
2405 set_ctrl(con, C->root());
2406 if (iff) {
2407 iff->set_req(1, con);
2408 }
2409 return con;
2410 }
2411
2412 //------------------------------ insert_if_before_proj -------------------------------------
2413 // Insert a new if before an if projection (* - new node)
2414 //
2415 // before
2416 // if(test)
2417 // / \
2418 // v v
2419 // other-proj proj (arg)
2420 //
2421 // after
2422 // if(test)
2423 // / \
2424 // / v
2425 // | * proj-clone
2426 // v |
2427 // other-proj v
2428 // * new_if(relop(cmp[IU](left,right)))
2429 // / \
2430 // v v
2431 // * new-proj proj
2432 // (returned)
2433 //
insert_if_before_proj(Node * left,bool Signed,BoolTest::mask relop,Node * right,ProjNode * proj)2434 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) {
2435 IfNode* iff = proj->in(0)->as_If();
2436 IdealLoopTree *loop = get_loop(proj);
2437 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
2438 int ddepth = dom_depth(proj);
2439
2440 _igvn.rehash_node_delayed(iff);
2441 _igvn.rehash_node_delayed(proj);
2442
2443 proj->set_req(0, NULL); // temporary disconnect
2444 ProjNode* proj2 = proj_clone(proj, iff);
2445 register_node(proj2, loop, iff, ddepth);
2446
2447 Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right);
2448 register_node(cmp, loop, proj2, ddepth);
2449
2450 BoolNode* bol = new BoolNode(cmp, relop);
2451 register_node(bol, loop, proj2, ddepth);
2452
2453 int opcode = iff->Opcode();
2454 assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode");
2455 IfNode* new_if = (opcode == Op_If) ? new IfNode(proj2, bol, iff->_prob, iff->_fcnt):
2456 new RangeCheckNode(proj2, bol, iff->_prob, iff->_fcnt);
2457 register_node(new_if, loop, proj2, ddepth);
2458
2459 proj->set_req(0, new_if); // reattach
2460 set_idom(proj, new_if, ddepth);
2461
2462 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj();
2463 guarantee(new_exit != NULL, "null exit node");
2464 register_node(new_exit, get_loop(other_proj), new_if, ddepth);
2465
2466 return new_exit;
2467 }
2468
2469 //------------------------------ insert_region_before_proj -------------------------------------
2470 // Insert a region before an if projection (* - new node)
2471 //
2472 // before
2473 // if(test)
2474 // / |
2475 // v |
2476 // proj v
2477 // other-proj
2478 //
2479 // after
2480 // if(test)
2481 // / |
2482 // v |
2483 // * proj-clone v
2484 // | other-proj
2485 // v
2486 // * new-region
2487 // |
2488 // v
2489 // * dum_if
2490 // / \
2491 // v \
2492 // * dum-proj v
2493 // proj
2494 //
insert_region_before_proj(ProjNode * proj)2495 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
2496 IfNode* iff = proj->in(0)->as_If();
2497 IdealLoopTree *loop = get_loop(proj);
2498 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
2499 int ddepth = dom_depth(proj);
2500
2501 _igvn.rehash_node_delayed(iff);
2502 _igvn.rehash_node_delayed(proj);
2503
2504 proj->set_req(0, NULL); // temporary disconnect
2505 ProjNode* proj2 = proj_clone(proj, iff);
2506 register_node(proj2, loop, iff, ddepth);
2507
2508 RegionNode* reg = new RegionNode(2);
2509 reg->set_req(1, proj2);
2510 register_node(reg, loop, iff, ddepth);
2511
2512 IfNode* dum_if = new IfNode(reg, short_circuit_if(NULL, proj), iff->_prob, iff->_fcnt);
2513 register_node(dum_if, loop, reg, ddepth);
2514
2515 proj->set_req(0, dum_if); // reattach
2516 set_idom(proj, dum_if, ddepth);
2517
2518 ProjNode* dum_proj = proj_clone(other_proj, dum_if);
2519 register_node(dum_proj, loop, dum_if, ddepth);
2520
2521 return reg;
2522 }
2523
2524 //------------------------------ insert_cmpi_loop_exit -------------------------------------
2525 // Clone a signed compare loop exit from an unsigned compare and
2526 // insert it before the unsigned cmp on the stay-in-loop path.
2527 // All new nodes inserted in the dominator tree between the original
2528 // if and it's projections. The original if test is replaced with
2529 // a constant to force the stay-in-loop path.
2530 //
2531 // This is done to make sure that the original if and it's projections
2532 // still dominate the same set of control nodes, that the ctrl() relation
2533 // from data nodes to them is preserved, and that their loop nesting is
2534 // preserved.
2535 //
2536 // before
2537 // if(i <u limit) unsigned compare loop exit
2538 // / |
2539 // v v
2540 // exit-proj stay-in-loop-proj
2541 //
2542 // after
2543 // if(stay-in-loop-const) original if
2544 // / |
2545 // / v
2546 // / if(i < limit) new signed test
2547 // / / |
2548 // / / v
2549 // / / if(i <u limit) new cloned unsigned test
2550 // / / / |
2551 // v v v |
2552 // region |
2553 // | |
2554 // dum-if |
2555 // / | |
2556 // ether | |
2557 // v v
2558 // exit-proj stay-in-loop-proj
2559 //
insert_cmpi_loop_exit(IfNode * if_cmpu,IdealLoopTree * loop)2560 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop) {
2561 const bool Signed = true;
2562 const bool Unsigned = false;
2563
2564 BoolNode* bol = if_cmpu->in(1)->as_Bool();
2565 if (bol->_test._test != BoolTest::lt) return NULL;
2566 CmpNode* cmpu = bol->in(1)->as_Cmp();
2567 if (cmpu->Opcode() != Op_CmpU) return NULL;
2568 int stride = stride_of_possible_iv(if_cmpu);
2569 if (stride == 0) return NULL;
2570
2571 Node* lp_proj = stay_in_loop(if_cmpu, loop);
2572 guarantee(lp_proj != NULL, "null loop node");
2573
2574 ProjNode* lp_continue = lp_proj->as_Proj();
2575 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj();
2576 if (!lp_exit->is_IfFalse()) {
2577 // The loop exit condition is (i <u limit) ==> (i >= 0 && i < limit).
2578 // We therefore can't add a single exit condition.
2579 return NULL;
2580 }
2581 // The loop exit condition is !(i <u limit) ==> (i < 0 || i >= limit).
2582 // Split out the exit condition (i < 0) for stride < 0 or (i >= limit) for stride > 0.
2583 Node* limit = NULL;
2584 if (stride > 0) {
2585 limit = cmpu->in(2);
2586 } else {
2587 limit = _igvn.makecon(TypeInt::ZERO);
2588 set_ctrl(limit, C->root());
2589 }
2590 // Create a new region on the exit path
2591 RegionNode* reg = insert_region_before_proj(lp_exit);
2592 guarantee(reg != NULL, "null region node");
2593
2594 // Clone the if-cmpu-true-false using a signed compare
2595 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
2596 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, limit, lp_continue);
2597 reg->add_req(cmpi_exit);
2598
2599 // Clone the if-cmpu-true-false
2600 BoolTest::mask rel_u = bol->_test._test;
2601 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue);
2602 reg->add_req(cmpu_exit);
2603
2604 // Force original if to stay in loop.
2605 short_circuit_if(if_cmpu, lp_continue);
2606
2607 return cmpi_exit->in(0)->as_If();
2608 }
2609
2610 //------------------------------ remove_cmpi_loop_exit -------------------------------------
2611 // Remove a previously inserted signed compare loop exit.
remove_cmpi_loop_exit(IfNode * if_cmp,IdealLoopTree * loop)2612 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) {
2613 Node* lp_proj = stay_in_loop(if_cmp, loop);
2614 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI &&
2615 stay_in_loop(lp_proj, loop)->is_If() &&
2616 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu");
2617 Node *con = _igvn.makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO);
2618 set_ctrl(con, C->root());
2619 if_cmp->set_req(1, con);
2620 }
2621
2622 //------------------------------ scheduled_nodelist -------------------------------------
2623 // Create a post order schedule of nodes that are in the
2624 // "member" set. The list is returned in "sched".
2625 // The first node in "sched" is the loop head, followed by
2626 // nodes which have no inputs in the "member" set, and then
2627 // followed by the nodes that have an immediate input dependence
2628 // on a node in "sched".
scheduled_nodelist(IdealLoopTree * loop,VectorSet & member,Node_List & sched)2629 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) {
2630
2631 assert(member.test(loop->_head->_idx), "loop head must be in member set");
2632 VectorSet visited;
2633 Node_Stack nstack(loop->_body.size());
2634
2635 Node* n = loop->_head; // top of stack is cached in "n"
2636 uint idx = 0;
2637 visited.set(n->_idx);
2638
2639 // Initially push all with no inputs from within member set
2640 for(uint i = 0; i < loop->_body.size(); i++ ) {
2641 Node *elt = loop->_body.at(i);
2642 if (member.test(elt->_idx)) {
2643 bool found = false;
2644 for (uint j = 0; j < elt->req(); j++) {
2645 Node* def = elt->in(j);
2646 if (def && member.test(def->_idx) && def != elt) {
2647 found = true;
2648 break;
2649 }
2650 }
2651 if (!found && elt != loop->_head) {
2652 nstack.push(n, idx);
2653 n = elt;
2654 assert(!visited.test(n->_idx), "not seen yet");
2655 visited.set(n->_idx);
2656 }
2657 }
2658 }
2659
2660 // traverse out's that are in the member set
2661 while (true) {
2662 if (idx < n->outcnt()) {
2663 Node* use = n->raw_out(idx);
2664 idx++;
2665 if (!visited.test_set(use->_idx)) {
2666 if (member.test(use->_idx)) {
2667 nstack.push(n, idx);
2668 n = use;
2669 idx = 0;
2670 }
2671 }
2672 } else {
2673 // All outputs processed
2674 sched.push(n);
2675 if (nstack.is_empty()) break;
2676 n = nstack.node();
2677 idx = nstack.index();
2678 nstack.pop();
2679 }
2680 }
2681 }
2682
2683
2684 //------------------------------ has_use_in_set -------------------------------------
2685 // Has a use in the vector set
has_use_in_set(Node * n,VectorSet & vset)2686 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) {
2687 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2688 Node* use = n->fast_out(j);
2689 if (vset.test(use->_idx)) {
2690 return true;
2691 }
2692 }
2693 return false;
2694 }
2695
2696
2697 //------------------------------ has_use_internal_to_set -------------------------------------
2698 // Has use internal to the vector set (ie. not in a phi at the loop head)
has_use_internal_to_set(Node * n,VectorSet & vset,IdealLoopTree * loop)2699 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) {
2700 Node* head = loop->_head;
2701 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2702 Node* use = n->fast_out(j);
2703 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) {
2704 return true;
2705 }
2706 }
2707 return false;
2708 }
2709
2710
2711 //------------------------------ clone_for_use_outside_loop -------------------------------------
2712 // clone "n" for uses that are outside of loop
clone_for_use_outside_loop(IdealLoopTree * loop,Node * n,Node_List & worklist)2713 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
2714 int cloned = 0;
2715 assert(worklist.size() == 0, "should be empty");
2716 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2717 Node* use = n->fast_out(j);
2718 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) {
2719 worklist.push(use);
2720 }
2721 }
2722 while( worklist.size() ) {
2723 Node *use = worklist.pop();
2724 if (!has_node(use) || use->in(0) == C->top()) continue;
2725 uint j;
2726 for (j = 0; j < use->req(); j++) {
2727 if (use->in(j) == n) break;
2728 }
2729 assert(j < use->req(), "must be there");
2730
2731 // clone "n" and insert it between the inputs of "n" and the use outside the loop
2732 Node* n_clone = n->clone();
2733 _igvn.replace_input_of(use, j, n_clone);
2734 cloned++;
2735 Node* use_c;
2736 if (!use->is_Phi()) {
2737 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
2738 } else {
2739 // Use in a phi is considered a use in the associated predecessor block
2740 use_c = use->in(0)->in(j);
2741 }
2742 set_ctrl(n_clone, use_c);
2743 assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
2744 get_loop(use_c)->_body.push(n_clone);
2745 _igvn.register_new_node_with_optimizer(n_clone);
2746 #ifndef PRODUCT
2747 if (TracePartialPeeling) {
2748 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx);
2749 }
2750 #endif
2751 }
2752 return cloned;
2753 }
2754
2755
2756 //------------------------------ clone_for_special_use_inside_loop -------------------------------------
2757 // clone "n" for special uses that are in the not_peeled region.
2758 // If these def-uses occur in separate blocks, the code generator
2759 // marks the method as not compilable. For example, if a "BoolNode"
2760 // is in a different basic block than the "IfNode" that uses it, then
2761 // the compilation is aborted in the code generator.
clone_for_special_use_inside_loop(IdealLoopTree * loop,Node * n,VectorSet & not_peel,Node_List & sink_list,Node_List & worklist)2762 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
2763 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) {
2764 if (n->is_Phi() || n->is_Load()) {
2765 return;
2766 }
2767 assert(worklist.size() == 0, "should be empty");
2768 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2769 Node* use = n->fast_out(j);
2770 if ( not_peel.test(use->_idx) &&
2771 (use->is_If() || use->is_CMove() || use->is_Bool()) &&
2772 use->in(1) == n) {
2773 worklist.push(use);
2774 }
2775 }
2776 if (worklist.size() > 0) {
2777 // clone "n" and insert it between inputs of "n" and the use
2778 Node* n_clone = n->clone();
2779 loop->_body.push(n_clone);
2780 _igvn.register_new_node_with_optimizer(n_clone);
2781 set_ctrl(n_clone, get_ctrl(n));
2782 sink_list.push(n_clone);
2783 not_peel.set(n_clone->_idx);
2784 #ifndef PRODUCT
2785 if (TracePartialPeeling) {
2786 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx);
2787 }
2788 #endif
2789 while( worklist.size() ) {
2790 Node *use = worklist.pop();
2791 _igvn.rehash_node_delayed(use);
2792 for (uint j = 1; j < use->req(); j++) {
2793 if (use->in(j) == n) {
2794 use->set_req(j, n_clone);
2795 }
2796 }
2797 }
2798 }
2799 }
2800
2801
2802 //------------------------------ insert_phi_for_loop -------------------------------------
2803 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
insert_phi_for_loop(Node * use,uint idx,Node * lp_entry_val,Node * back_edge_val,LoopNode * lp)2804 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) {
2805 Node *phi = PhiNode::make(lp, back_edge_val);
2806 phi->set_req(LoopNode::EntryControl, lp_entry_val);
2807 // Use existing phi if it already exists
2808 Node *hit = _igvn.hash_find_insert(phi);
2809 if( hit == NULL ) {
2810 _igvn.register_new_node_with_optimizer(phi);
2811 set_ctrl(phi, lp);
2812 } else {
2813 // Remove the new phi from the graph and use the hit
2814 _igvn.remove_dead_node(phi);
2815 phi = hit;
2816 }
2817 _igvn.replace_input_of(use, idx, phi);
2818 }
2819
2820 #ifdef ASSERT
2821 //------------------------------ is_valid_loop_partition -------------------------------------
2822 // Validate the loop partition sets: peel and not_peel
is_valid_loop_partition(IdealLoopTree * loop,VectorSet & peel,Node_List & peel_list,VectorSet & not_peel)2823 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list,
2824 VectorSet& not_peel ) {
2825 uint i;
2826 // Check that peel_list entries are in the peel set
2827 for (i = 0; i < peel_list.size(); i++) {
2828 if (!peel.test(peel_list.at(i)->_idx)) {
2829 return false;
2830 }
2831 }
2832 // Check at loop members are in one of peel set or not_peel set
2833 for (i = 0; i < loop->_body.size(); i++ ) {
2834 Node *def = loop->_body.at(i);
2835 uint di = def->_idx;
2836 // Check that peel set elements are in peel_list
2837 if (peel.test(di)) {
2838 if (not_peel.test(di)) {
2839 return false;
2840 }
2841 // Must be in peel_list also
2842 bool found = false;
2843 for (uint j = 0; j < peel_list.size(); j++) {
2844 if (peel_list.at(j)->_idx == di) {
2845 found = true;
2846 break;
2847 }
2848 }
2849 if (!found) {
2850 return false;
2851 }
2852 } else if (not_peel.test(di)) {
2853 if (peel.test(di)) {
2854 return false;
2855 }
2856 } else {
2857 return false;
2858 }
2859 }
2860 return true;
2861 }
2862
2863 //------------------------------ is_valid_clone_loop_exit_use -------------------------------------
2864 // Ensure a use outside of loop is of the right form
is_valid_clone_loop_exit_use(IdealLoopTree * loop,Node * use,uint exit_idx)2865 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) {
2866 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
2867 return (use->is_Phi() &&
2868 use_c->is_Region() && use_c->req() == 3 &&
2869 (use_c->in(exit_idx)->Opcode() == Op_IfTrue ||
2870 use_c->in(exit_idx)->Opcode() == Op_IfFalse ||
2871 use_c->in(exit_idx)->Opcode() == Op_JumpProj) &&
2872 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) );
2873 }
2874
2875 //------------------------------ is_valid_clone_loop_form -------------------------------------
2876 // Ensure that all uses outside of loop are of the right form
is_valid_clone_loop_form(IdealLoopTree * loop,Node_List & peel_list,uint orig_exit_idx,uint clone_exit_idx)2877 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
2878 uint orig_exit_idx, uint clone_exit_idx) {
2879 uint len = peel_list.size();
2880 for (uint i = 0; i < len; i++) {
2881 Node *def = peel_list.at(i);
2882
2883 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
2884 Node *use = def->fast_out(j);
2885 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
2886 if (!loop->is_member(get_loop(use_c))) {
2887 // use is not in the loop, check for correct structure
2888 if (use->in(0) == def) {
2889 // Okay
2890 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) {
2891 return false;
2892 }
2893 }
2894 }
2895 }
2896 return true;
2897 }
2898 #endif
2899
2900 //------------------------------ partial_peel -------------------------------------
2901 // Partially peel (aka loop rotation) the top portion of a loop (called
2902 // the peel section below) by cloning it and placing one copy just before
2903 // the new loop head and the other copy at the bottom of the new loop.
2904 //
2905 // before after where it came from
2906 //
2907 // stmt1 stmt1
2908 // loop: stmt2 clone
2909 // stmt2 if condA goto exitA clone
2910 // if condA goto exitA new_loop: new
2911 // stmt3 stmt3 clone
2912 // if !condB goto loop if condB goto exitB clone
2913 // exitB: stmt2 orig
2914 // stmt4 if !condA goto new_loop orig
2915 // exitA: goto exitA
2916 // exitB:
2917 // stmt4
2918 // exitA:
2919 //
2920 // Step 1: find the cut point: an exit test on probable
2921 // induction variable.
2922 // Step 2: schedule (with cloning) operations in the peel
2923 // section that can be executed after the cut into
2924 // the section that is not peeled. This may need
2925 // to clone operations into exit blocks. For
2926 // instance, a reference to A[i] in the not-peel
2927 // section and a reference to B[i] in an exit block
2928 // may cause a left-shift of i by 2 to be placed
2929 // in the peel block. This step will clone the left
2930 // shift into the exit block and sink the left shift
2931 // from the peel to the not-peel section.
2932 // Step 3: clone the loop, retarget the control, and insert
2933 // phis for values that are live across the new loop
2934 // head. This is very dependent on the graph structure
2935 // from clone_loop. It creates region nodes for
2936 // exit control and associated phi nodes for values
2937 // flow out of the loop through that exit. The region
2938 // node is dominated by the clone's control projection.
2939 // So the clone's peel section is placed before the
2940 // new loop head, and the clone's not-peel section is
2941 // forms the top part of the new loop. The original
2942 // peel section forms the tail of the new loop.
2943 // Step 4: update the dominator tree and recompute the
2944 // dominator depth.
2945 //
2946 // orig
2947 //
2948 // stmt1
2949 // |
2950 // v
2951 // loop predicate
2952 // |
2953 // v
2954 // loop<----+
2955 // | |
2956 // stmt2 |
2957 // | |
2958 // v |
2959 // ifA |
2960 // / | |
2961 // v v |
2962 // false true ^ <-- last_peel
2963 // / | |
2964 // / ===|==cut |
2965 // / stmt3 | <-- first_not_peel
2966 // / | |
2967 // | v |
2968 // v ifB |
2969 // exitA: / \ |
2970 // / \ |
2971 // v v |
2972 // false true |
2973 // / \ |
2974 // / ----+
2975 // |
2976 // v
2977 // exitB:
2978 // stmt4
2979 //
2980 //
2981 // after clone loop
2982 //
2983 // stmt1
2984 // |
2985 // v
2986 // loop predicate
2987 // / \
2988 // clone / \ orig
2989 // / \
2990 // / \
2991 // v v
2992 // +---->loop loop<----+
2993 // | | | |
2994 // | stmt2 stmt2 |
2995 // | | | |
2996 // | v v |
2997 // | ifA ifA |
2998 // | | \ / | |
2999 // | v v v v |
3000 // ^ true false false true ^ <-- last_peel
3001 // | | ^ \ / | |
3002 // | cut==|== \ \ / ===|==cut |
3003 // | stmt3 \ \ / stmt3 | <-- first_not_peel
3004 // | | dom | | | |
3005 // | v \ 1v v2 v |
3006 // | ifB regionA ifB |
3007 // | / \ | / \ |
3008 // | / \ v / \ |
3009 // | v v exitA: v v |
3010 // | true false false true |
3011 // | / ^ \ / \ |
3012 // +---- \ \ / ----+
3013 // dom \ /
3014 // \ 1v v2
3015 // regionB
3016 // |
3017 // v
3018 // exitB:
3019 // stmt4
3020 //
3021 //
3022 // after partial peel
3023 //
3024 // stmt1
3025 // |
3026 // v
3027 // loop predicate
3028 // /
3029 // clone / orig
3030 // / TOP
3031 // / \
3032 // v v
3033 // TOP->loop loop----+
3034 // | | |
3035 // stmt2 stmt2 |
3036 // | | |
3037 // v v |
3038 // ifA ifA |
3039 // | \ / | |
3040 // v v v v |
3041 // true false false true | <-- last_peel
3042 // | ^ \ / +------|---+
3043 // +->newloop \ \ / === ==cut | |
3044 // | stmt3 \ \ / TOP | |
3045 // | | dom | | stmt3 | | <-- first_not_peel
3046 // | v \ 1v v2 v | |
3047 // | ifB regionA ifB ^ v
3048 // | / \ | / \ | |
3049 // | / \ v / \ | |
3050 // | v v exitA: v v | |
3051 // | true false false true | |
3052 // | / ^ \ / \ | |
3053 // | | \ \ / v | |
3054 // | | dom \ / TOP | |
3055 // | | \ 1v v2 | |
3056 // ^ v regionB | |
3057 // | | | | |
3058 // | | v ^ v
3059 // | | exitB: | |
3060 // | | stmt4 | |
3061 // | +------------>-----------------+ |
3062 // | |
3063 // +-----------------<---------------------+
3064 //
3065 //
3066 // final graph
3067 //
3068 // stmt1
3069 // |
3070 // v
3071 // loop predicate
3072 // |
3073 // v
3074 // stmt2 clone
3075 // |
3076 // v
3077 // ........> ifA clone
3078 // : / |
3079 // dom / |
3080 // : v v
3081 // : false true
3082 // : | |
3083 // : | v
3084 // : | newloop<-----+
3085 // : | | |
3086 // : | stmt3 clone |
3087 // : | | |
3088 // : | v |
3089 // : | ifB |
3090 // : | / \ |
3091 // : | v v |
3092 // : | false true |
3093 // : | | | |
3094 // : | v stmt2 |
3095 // : | exitB: | |
3096 // : | stmt4 v |
3097 // : | ifA orig |
3098 // : | / \ |
3099 // : | / \ |
3100 // : | v v |
3101 // : | false true |
3102 // : | / \ |
3103 // : v v -----+
3104 // RegionA
3105 // |
3106 // v
3107 // exitA
3108 //
partial_peel(IdealLoopTree * loop,Node_List & old_new)3109 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
3110
3111 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only");
3112 if (!loop->_head->is_Loop()) {
3113 return false;
3114 }
3115 LoopNode *head = loop->_head->as_Loop();
3116
3117 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) {
3118 return false;
3119 }
3120
3121 // Check for complex exit control
3122 for (uint ii = 0; ii < loop->_body.size(); ii++) {
3123 Node *n = loop->_body.at(ii);
3124 int opc = n->Opcode();
3125 if (n->is_Call() ||
3126 opc == Op_Catch ||
3127 opc == Op_CatchProj ||
3128 opc == Op_Jump ||
3129 opc == Op_JumpProj) {
3130 #ifndef PRODUCT
3131 if (TracePartialPeeling) {
3132 tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
3133 }
3134 #endif
3135 return false;
3136 }
3137 }
3138
3139 int dd = dom_depth(head);
3140
3141 // Step 1: find cut point
3142
3143 // Walk up dominators to loop head looking for first loop exit
3144 // which is executed on every path thru loop.
3145 IfNode *peel_if = NULL;
3146 IfNode *peel_if_cmpu = NULL;
3147
3148 Node *iff = loop->tail();
3149 while (iff != head) {
3150 if (iff->is_If()) {
3151 Node *ctrl = get_ctrl(iff->in(1));
3152 if (ctrl->is_top()) return false; // Dead test on live IF.
3153 // If loop-varying exit-test, check for induction variable
3154 if (loop->is_member(get_loop(ctrl)) &&
3155 loop->is_loop_exit(iff) &&
3156 is_possible_iv_test(iff)) {
3157 Node* cmp = iff->in(1)->in(1);
3158 if (cmp->Opcode() == Op_CmpI) {
3159 peel_if = iff->as_If();
3160 } else {
3161 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU");
3162 peel_if_cmpu = iff->as_If();
3163 }
3164 }
3165 }
3166 iff = idom(iff);
3167 }
3168
3169 // Prefer signed compare over unsigned compare.
3170 IfNode* new_peel_if = NULL;
3171 if (peel_if == NULL) {
3172 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == NULL) {
3173 return false; // No peel point found
3174 }
3175 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop);
3176 if (new_peel_if == NULL) {
3177 return false; // No peel point found
3178 }
3179 peel_if = new_peel_if;
3180 }
3181 Node* last_peel = stay_in_loop(peel_if, loop);
3182 Node* first_not_peeled = stay_in_loop(last_peel, loop);
3183 if (first_not_peeled == NULL || first_not_peeled == head) {
3184 return false;
3185 }
3186
3187 #ifndef PRODUCT
3188 if (TraceLoopOpts) {
3189 tty->print("PartialPeel ");
3190 loop->dump_head();
3191 }
3192
3193 if (TracePartialPeeling) {
3194 tty->print_cr("before partial peel one iteration");
3195 Node_List wl;
3196 Node* t = head->in(2);
3197 while (true) {
3198 wl.push(t);
3199 if (t == head) break;
3200 t = idom(t);
3201 }
3202 while (wl.size() > 0) {
3203 Node* tt = wl.pop();
3204 tt->dump();
3205 if (tt == last_peel) tty->print_cr("-- cut --");
3206 }
3207 }
3208 #endif
3209 VectorSet peel;
3210 VectorSet not_peel;
3211 Node_List peel_list;
3212 Node_List worklist;
3213 Node_List sink_list;
3214
3215 uint estimate = loop->est_loop_clone_sz(1);
3216 if (exceeding_node_budget(estimate)) {
3217 return false;
3218 }
3219
3220 // Set of cfg nodes to peel are those that are executable from
3221 // the head through last_peel.
3222 assert(worklist.size() == 0, "should be empty");
3223 worklist.push(head);
3224 peel.set(head->_idx);
3225 while (worklist.size() > 0) {
3226 Node *n = worklist.pop();
3227 if (n != last_peel) {
3228 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3229 Node* use = n->fast_out(j);
3230 if (use->is_CFG() &&
3231 loop->is_member(get_loop(use)) &&
3232 !peel.test_set(use->_idx)) {
3233 worklist.push(use);
3234 }
3235 }
3236 }
3237 }
3238
3239 // Set of non-cfg nodes to peel are those that are control
3240 // dependent on the cfg nodes.
3241 for (uint i = 0; i < loop->_body.size(); i++) {
3242 Node *n = loop->_body.at(i);
3243 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n;
3244 if (peel.test(n_c->_idx)) {
3245 peel.set(n->_idx);
3246 } else {
3247 not_peel.set(n->_idx);
3248 }
3249 }
3250
3251 // Step 2: move operations from the peeled section down into the
3252 // not-peeled section
3253
3254 // Get a post order schedule of nodes in the peel region
3255 // Result in right-most operand.
3256 scheduled_nodelist(loop, peel, peel_list);
3257
3258 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3259
3260 // For future check for too many new phis
3261 uint old_phi_cnt = 0;
3262 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
3263 Node* use = head->fast_out(j);
3264 if (use->is_Phi()) old_phi_cnt++;
3265 }
3266
3267 #ifndef PRODUCT
3268 if (TracePartialPeeling) {
3269 tty->print_cr("\npeeled list");
3270 }
3271 #endif
3272
3273 // Evacuate nodes in peel region into the not_peeled region if possible
3274 uint new_phi_cnt = 0;
3275 uint cloned_for_outside_use = 0;
3276 for (uint i = 0; i < peel_list.size();) {
3277 Node* n = peel_list.at(i);
3278 #ifndef PRODUCT
3279 if (TracePartialPeeling) n->dump();
3280 #endif
3281 bool incr = true;
3282 if (!n->is_CFG()) {
3283 if (has_use_in_set(n, not_peel)) {
3284 // If not used internal to the peeled region,
3285 // move "n" from peeled to not_peeled region.
3286 if (!has_use_internal_to_set(n, peel, loop)) {
3287 // if not pinned and not a load (which maybe anti-dependent on a store)
3288 // and not a CMove (Matcher expects only bool->cmove).
3289 if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove()) {
3290 cloned_for_outside_use += clone_for_use_outside_loop(loop, n, worklist);
3291 sink_list.push(n);
3292 peel.remove(n->_idx);
3293 not_peel.set(n->_idx);
3294 peel_list.remove(i);
3295 incr = false;
3296 #ifndef PRODUCT
3297 if (TracePartialPeeling) {
3298 tty->print_cr("sink to not_peeled region: %d newbb: %d",
3299 n->_idx, get_ctrl(n)->_idx);
3300 }
3301 #endif
3302 }
3303 } else {
3304 // Otherwise check for special def-use cases that span
3305 // the peel/not_peel boundary such as bool->if
3306 clone_for_special_use_inside_loop(loop, n, not_peel, sink_list, worklist);
3307 new_phi_cnt++;
3308 }
3309 }
3310 }
3311 if (incr) i++;
3312 }
3313
3314 estimate += cloned_for_outside_use + new_phi_cnt;
3315 bool exceed_node_budget = !may_require_nodes(estimate);
3316 bool exceed_phi_limit = new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta;
3317
3318 if (exceed_node_budget || exceed_phi_limit) {
3319 #ifndef PRODUCT
3320 if (TracePartialPeeling) {
3321 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c",
3322 new_phi_cnt, old_phi_cnt, new_peel_if != NULL?'T':'F');
3323 }
3324 #endif
3325 if (new_peel_if != NULL) {
3326 remove_cmpi_loop_exit(new_peel_if, loop);
3327 }
3328 // Inhibit more partial peeling on this loop
3329 assert(!head->is_partial_peel_loop(), "not partial peeled");
3330 head->mark_partial_peel_failed();
3331 if (cloned_for_outside_use > 0) {
3332 // Terminate this round of loop opts because
3333 // the graph outside this loop was changed.
3334 C->set_major_progress();
3335 return true;
3336 }
3337 return false;
3338 }
3339
3340 // Step 3: clone loop, retarget control, and insert new phis
3341
3342 // Create new loop head for new phis and to hang
3343 // the nodes being moved (sinked) from the peel region.
3344 LoopNode* new_head = new LoopNode(last_peel, last_peel);
3345 new_head->set_unswitch_count(head->unswitch_count()); // Preserve
3346 _igvn.register_new_node_with_optimizer(new_head);
3347 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
3348 _igvn.replace_input_of(first_not_peeled, 0, new_head);
3349 set_loop(new_head, loop);
3350 loop->_body.push(new_head);
3351 not_peel.set(new_head->_idx);
3352 set_idom(new_head, last_peel, dom_depth(first_not_peeled));
3353 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled));
3354
3355 while (sink_list.size() > 0) {
3356 Node* n = sink_list.pop();
3357 set_ctrl(n, new_head);
3358 }
3359
3360 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3361
3362 clone_loop(loop, old_new, dd, IgnoreStripMined);
3363
3364 const uint clone_exit_idx = 1;
3365 const uint orig_exit_idx = 2;
3366 assert(is_valid_clone_loop_form(loop, peel_list, orig_exit_idx, clone_exit_idx), "bad clone loop");
3367
3368 Node* head_clone = old_new[head->_idx];
3369 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop();
3370 Node* orig_tail_clone = head_clone->in(2);
3371
3372 // Add phi if "def" node is in peel set and "use" is not
3373
3374 for (uint i = 0; i < peel_list.size(); i++) {
3375 Node *def = peel_list.at(i);
3376 if (!def->is_CFG()) {
3377 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
3378 Node *use = def->fast_out(j);
3379 if (has_node(use) && use->in(0) != C->top() &&
3380 (!peel.test(use->_idx) ||
3381 (use->is_Phi() && use->in(0) == head)) ) {
3382 worklist.push(use);
3383 }
3384 }
3385 while( worklist.size() ) {
3386 Node *use = worklist.pop();
3387 for (uint j = 1; j < use->req(); j++) {
3388 Node* n = use->in(j);
3389 if (n == def) {
3390
3391 // "def" is in peel set, "use" is not in peel set
3392 // or "use" is in the entry boundary (a phi) of the peel set
3393
3394 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use;
3395
3396 if ( loop->is_member(get_loop( use_c )) ) {
3397 // use is in loop
3398 if (old_new[use->_idx] != NULL) { // null for dead code
3399 Node* use_clone = old_new[use->_idx];
3400 _igvn.replace_input_of(use, j, C->top());
3401 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
3402 }
3403 } else {
3404 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format");
3405 // use is not in the loop, check if the live range includes the cut
3406 Node* lp_if = use_c->in(orig_exit_idx)->in(0);
3407 if (not_peel.test(lp_if->_idx)) {
3408 assert(j == orig_exit_idx, "use from original loop");
3409 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone );
3410 }
3411 }
3412 }
3413 }
3414 }
3415 }
3416 }
3417
3418 // Step 3b: retarget control
3419
3420 // Redirect control to the new loop head if a cloned node in
3421 // the not_peeled region has control that points into the peeled region.
3422 // This necessary because the cloned peeled region will be outside
3423 // the loop.
3424 // from to
3425 // cloned-peeled <---+
3426 // new_head_clone: | <--+
3427 // cloned-not_peeled in(0) in(0)
3428 // orig-peeled
3429
3430 for (uint i = 0; i < loop->_body.size(); i++) {
3431 Node *n = loop->_body.at(i);
3432 if (!n->is_CFG() && n->in(0) != NULL &&
3433 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
3434 Node* n_clone = old_new[n->_idx];
3435 _igvn.replace_input_of(n_clone, 0, new_head_clone);
3436 }
3437 }
3438
3439 // Backedge of the surviving new_head (the clone) is original last_peel
3440 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
3441
3442 // Cut first node in original not_peel set
3443 _igvn.rehash_node_delayed(new_head); // Multiple edge updates:
3444 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of
3445 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls
3446
3447 // Copy head_clone back-branch info to original head
3448 // and remove original head's loop entry and
3449 // clone head's back-branch
3450 _igvn.rehash_node_delayed(head); // Multiple edge updates
3451 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
3452 head->set_req(LoopNode::LoopBackControl, C->top());
3453 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
3454
3455 // Similarly modify the phis
3456 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
3457 Node* use = head->fast_out(k);
3458 if (use->is_Phi() && use->outcnt() > 0) {
3459 Node* use_clone = old_new[use->_idx];
3460 _igvn.rehash_node_delayed(use); // Multiple edge updates
3461 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
3462 use->set_req(LoopNode::LoopBackControl, C->top());
3463 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
3464 }
3465 }
3466
3467 // Step 4: update dominator tree and dominator depth
3468
3469 set_idom(head, orig_tail_clone, dd);
3470 recompute_dom_depth();
3471
3472 // Inhibit more partial peeling on this loop
3473 new_head_clone->set_partial_peel_loop();
3474 C->set_major_progress();
3475 loop->record_for_igvn();
3476
3477 #ifndef PRODUCT
3478 if (TracePartialPeeling) {
3479 tty->print_cr("\nafter partial peel one iteration");
3480 Node_List wl;
3481 Node* t = last_peel;
3482 while (true) {
3483 wl.push(t);
3484 if (t == head_clone) break;
3485 t = idom(t);
3486 }
3487 while (wl.size() > 0) {
3488 Node* tt = wl.pop();
3489 if (tt == head) tty->print_cr("orig head");
3490 else if (tt == new_head_clone) tty->print_cr("new head");
3491 else if (tt == head_clone) tty->print_cr("clone head");
3492 tt->dump();
3493 }
3494 }
3495 #endif
3496 return true;
3497 }
3498
3499 //------------------------------reorg_offsets----------------------------------
3500 // Reorganize offset computations to lower register pressure. Mostly
3501 // prevent loop-fallout uses of the pre-incremented trip counter (which are
3502 // then alive with the post-incremented trip counter forcing an extra
3503 // register move)
reorg_offsets(IdealLoopTree * loop)3504 void PhaseIdealLoop::reorg_offsets(IdealLoopTree *loop) {
3505 // Perform it only for canonical counted loops.
3506 // Loop's shape could be messed up by iteration_split_impl.
3507 if (!loop->_head->is_CountedLoop())
3508 return;
3509 if (!loop->_head->as_Loop()->is_valid_counted_loop(T_INT))
3510 return;
3511
3512 CountedLoopNode *cl = loop->_head->as_CountedLoop();
3513 CountedLoopEndNode *cle = cl->loopexit();
3514 Node *exit = cle->proj_out(false);
3515 Node *phi = cl->phi();
3516
3517 // Check for the special case when using the pre-incremented trip-counter on
3518 // the fall-out path (forces the pre-incremented and post-incremented trip
3519 // counter to be live at the same time). Fix this by adjusting to use the
3520 // post-increment trip counter.
3521
3522 bool progress = true;
3523 while (progress) {
3524 progress = false;
3525 for (DUIterator_Fast imax, i = phi->fast_outs(imax); i < imax; i++) {
3526 Node* use = phi->fast_out(i); // User of trip-counter
3527 if (!has_ctrl(use)) continue;
3528 Node *u_ctrl = get_ctrl(use);
3529 if (use->is_Phi()) {
3530 u_ctrl = NULL;
3531 for (uint j = 1; j < use->req(); j++)
3532 if (use->in(j) == phi)
3533 u_ctrl = dom_lca(u_ctrl, use->in(0)->in(j));
3534 }
3535 IdealLoopTree *u_loop = get_loop(u_ctrl);
3536 // Look for loop-invariant use
3537 if (u_loop == loop) continue;
3538 if (loop->is_member(u_loop)) continue;
3539 // Check that use is live out the bottom. Assuming the trip-counter
3540 // update is right at the bottom, uses of of the loop middle are ok.
3541 if (dom_lca(exit, u_ctrl) != exit) continue;
3542 // Hit! Refactor use to use the post-incremented tripcounter.
3543 // Compute a post-increment tripcounter.
3544 Node* c = exit;
3545 if (cl->is_strip_mined()) {
3546 IdealLoopTree* outer_loop = get_loop(cl->outer_loop());
3547 if (!outer_loop->is_member(u_loop)) {
3548 c = cl->outer_loop_exit();
3549 }
3550 }
3551 Node *opaq = new Opaque2Node(C, cle->incr());
3552 register_new_node(opaq, c);
3553 Node *neg_stride = _igvn.intcon(-cle->stride_con());
3554 set_ctrl(neg_stride, C->root());
3555 Node *post = new AddINode(opaq, neg_stride);
3556 register_new_node(post, c);
3557 _igvn.rehash_node_delayed(use);
3558 for (uint j = 1; j < use->req(); j++) {
3559 if (use->in(j) == phi)
3560 use->set_req(j, post);
3561 }
3562 // Since DU info changed, rerun loop
3563 progress = true;
3564 break;
3565 }
3566 }
3567
3568 }
3569